From 6cc0719181a7aa8883855140541e7892250e66af Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 22 Jun 2006 14:47:18 -0700 Subject: [PATCH] suspend_console() warning fix kernel/power/main.c: In function 'suspend_prepare': kernel/power/main.c:89: warning: implicit declaration of function 'suspend_console' kernel/power/main.c: In function 'suspend_finish': kernel/power/main.c:137: warning: implicit declaration of function 'resume_console' Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/power') diff --git a/kernel/power/main.c b/kernel/power/main.c index 0a907f0dc56b..cdf0f07af92f 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -15,7 +15,7 @@ #include #include #include - +#include #include "power.h" -- cgit v1.2.3 From d6277db4ab271862ed599da08d78961c70f00002 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 23 Jun 2006 02:03:18 -0700 Subject: [PATCH] swsusp: rework memory shrinker MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rework the swsusp's memory shrinker in the following way: - Simplify balance_pgdat() by removing all of the swsusp-related code from it. - Make shrink_all_memory() use shrink_slab() and a new function shrink_all_zones() which calls shrink_active_list() and shrink_inactive_list() directly for each zone in a way that's optimized for suspend. In shrink_all_memory() we try to free exactly as many pages as the caller asks for, preferably in one shot, starting from easier targets.  If slab caches are huge, they are most likely to have enough pages to reclaim.  The inactive lists are next (the zones with more inactive pages go first) etc. Each time shrink_all_memory() attempts to shrink the active and inactive lists for each zone in 5 passes.  In the first pass, only the inactive lists are taken into consideration.  In the next two passes the active lists are also shrunk, but mapped pages are not reclaimed.  In the last two passes the active and inactive lists are shrunk and mapped pages are reclaimed as well. The aim of this is to alter the reclaim logic to choose the best pages to keep on resume and improve the responsiveness of the resumed system. Signed-off-by: Rafael J. Wysocki Signed-off-by: Con Kolivas Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swsusp.c | 10 ++- mm/vmscan.c | 219 +++++++++++++++++++++++++++++++++++++------------- 2 files changed, 172 insertions(+), 57 deletions(-) (limited to 'kernel/power') diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index c4016cbbd3e0..f9238faf76e4 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -175,6 +175,12 @@ void free_all_swap_pages(int swap, struct bitmap_page *bitmap) */ #define SHRINK_BITE 10000 +static inline unsigned long __shrink_memory(long tmp) +{ + if (tmp > SHRINK_BITE) + tmp = SHRINK_BITE; + return shrink_all_memory(tmp); +} int swsusp_shrink_memory(void) { @@ -195,12 +201,12 @@ int swsusp_shrink_memory(void) if (!is_highmem(zone)) tmp -= zone->free_pages; if (tmp > 0) { - tmp = shrink_all_memory(SHRINK_BITE); + tmp = __shrink_memory(tmp); if (!tmp) return -ENOMEM; pages += tmp; } else if (size > image_size / PAGE_SIZE) { - tmp = shrink_all_memory(SHRINK_BITE); + tmp = __shrink_memory(size - (image_size / PAGE_SIZE)); pages += tmp; } printk("\b%c", p[i++%4]); diff --git a/mm/vmscan.c b/mm/vmscan.c index 440a733fe2e9..46be8a02280e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -61,6 +61,8 @@ struct scan_control { * In this context, it doesn't matter that we scan the * whole list at once. */ int swap_cluster_max; + + int swappiness; }; /* @@ -741,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * A 100% value of vm_swappiness overrides this algorithm * altogether. */ - swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; + swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; /* * Now use this metric to decide whether to start moving mapped @@ -957,6 +959,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) .may_writepage = !laptop_mode, .swap_cluster_max = SWAP_CLUSTER_MAX, .may_swap = 1, + .swappiness = vm_swappiness, }; inc_page_state(allocstall); @@ -1021,10 +1024,6 @@ out: * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at pages_high. * - * If `nr_pages' is non-zero then it is the number of pages which are to be - * reclaimed, regardless of the zone occupancies. This is a software suspend - * special. - * * Returns the number of pages which were actually freed. * * There is special handling here for zones which are full of pinned pages. @@ -1042,10 +1041,8 @@ out: * the page allocator fallback scheme to ensure that aging of pages is balanced * across the zones. */ -static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, - int order) +static unsigned long balance_pgdat(pg_data_t *pgdat, int order) { - unsigned long to_free = nr_pages; int all_zones_ok; int priority; int i; @@ -1055,7 +1052,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_swap = 1, - .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX, + .swap_cluster_max = SWAP_CLUSTER_MAX, + .swappiness = vm_swappiness, }; loop_again: @@ -1082,31 +1080,26 @@ loop_again: all_zones_ok = 1; - if (nr_pages == 0) { - /* - * Scan in the highmem->dma direction for the highest - * zone which needs scanning - */ - for (i = pgdat->nr_zones - 1; i >= 0; i--) { - struct zone *zone = pgdat->node_zones + i; + /* + * Scan in the highmem->dma direction for the highest + * zone which needs scanning + */ + for (i = pgdat->nr_zones - 1; i >= 0; i--) { + struct zone *zone = pgdat->node_zones + i; - if (!populated_zone(zone)) - continue; + if (!populated_zone(zone)) + continue; - if (zone->all_unreclaimable && - priority != DEF_PRIORITY) - continue; + if (zone->all_unreclaimable && priority != DEF_PRIORITY) + continue; - if (!zone_watermark_ok(zone, order, - zone->pages_high, 0, 0)) { - end_zone = i; - goto scan; - } + if (!zone_watermark_ok(zone, order, zone->pages_high, + 0, 0)) { + end_zone = i; + goto scan; } - goto out; - } else { - end_zone = pgdat->nr_zones - 1; } + goto out; scan: for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; @@ -1133,11 +1126,9 @@ scan: if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; - if (nr_pages == 0) { /* Not software suspend */ - if (!zone_watermark_ok(zone, order, - zone->pages_high, end_zone, 0)) - all_zones_ok = 0; - } + if (!zone_watermark_ok(zone, order, zone->pages_high, + end_zone, 0)) + all_zones_ok = 0; zone->temp_priority = priority; if (zone->prev_priority > priority) zone->prev_priority = priority; @@ -1162,8 +1153,6 @@ scan: total_scanned > nr_reclaimed + nr_reclaimed / 2) sc.may_writepage = 1; } - if (nr_pages && to_free > nr_reclaimed) - continue; /* swsusp: need to do more work */ if (all_zones_ok) break; /* kswapd: all done */ /* @@ -1179,7 +1168,7 @@ scan: * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ - if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages) + if (nr_reclaimed >= SWAP_CLUSTER_MAX) break; } out: @@ -1261,7 +1250,7 @@ static int kswapd(void *p) } finish_wait(&pgdat->kswapd_wait, &wait); - balance_pgdat(pgdat, 0, order); + balance_pgdat(pgdat, order); } return 0; } @@ -1290,35 +1279,154 @@ void wakeup_kswapd(struct zone *zone, int order) #ifdef CONFIG_PM /* - * Try to free `nr_pages' of memory, system-wide. Returns the number of freed - * pages. + * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages + * from LRU lists system-wide, for given pass and priority, and returns the + * number of reclaimed pages + * + * For pass > 3 we also try to shrink the LRU lists that contain a few pages + */ +static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, + int prio, struct scan_control *sc) +{ + struct zone *zone; + unsigned long nr_to_scan, ret = 0; + + for_each_zone(zone) { + + if (!populated_zone(zone)) + continue; + + if (zone->all_unreclaimable && prio != DEF_PRIORITY) + continue; + + /* For pass = 0 we don't shrink the active list */ + if (pass > 0) { + zone->nr_scan_active += (zone->nr_active >> prio) + 1; + if (zone->nr_scan_active >= nr_pages || pass > 3) { + zone->nr_scan_active = 0; + nr_to_scan = min(nr_pages, zone->nr_active); + shrink_active_list(nr_to_scan, zone, sc); + } + } + + zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1; + if (zone->nr_scan_inactive >= nr_pages || pass > 3) { + zone->nr_scan_inactive = 0; + nr_to_scan = min(nr_pages, zone->nr_inactive); + ret += shrink_inactive_list(nr_to_scan, zone, sc); + if (ret >= nr_pages) + return ret; + } + } + + return ret; +} + +/* + * Try to free `nr_pages' of memory, system-wide, and return the number of + * freed pages. + * + * Rather than trying to age LRUs the aim is to preserve the overall + * LRU order by reclaiming preferentially + * inactive > active > active referenced > active mapped */ unsigned long shrink_all_memory(unsigned long nr_pages) { - pg_data_t *pgdat; - unsigned long nr_to_free = nr_pages; + unsigned long lru_pages, nr_slab; unsigned long ret = 0; - unsigned retry = 2; - struct reclaim_state reclaim_state = { - .reclaimed_slab = 0, + int pass; + struct reclaim_state reclaim_state; + struct zone *zone; + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + .may_swap = 0, + .swap_cluster_max = nr_pages, + .may_writepage = 1, + .swappiness = vm_swappiness, }; current->reclaim_state = &reclaim_state; -repeat: - for_each_online_pgdat(pgdat) { - unsigned long freed; - freed = balance_pgdat(pgdat, nr_to_free, 0); - ret += freed; - nr_to_free -= freed; - if ((long)nr_to_free <= 0) + lru_pages = 0; + for_each_zone(zone) + lru_pages += zone->nr_active + zone->nr_inactive; + + nr_slab = read_page_state(nr_slab); + /* If slab caches are huge, it's better to hit them first */ + while (nr_slab >= lru_pages) { + reclaim_state.reclaimed_slab = 0; + shrink_slab(nr_pages, sc.gfp_mask, lru_pages); + if (!reclaim_state.reclaimed_slab) break; + + ret += reclaim_state.reclaimed_slab; + if (ret >= nr_pages) + goto out; + + nr_slab -= reclaim_state.reclaimed_slab; } - if (retry-- && ret < nr_pages) { - blk_congestion_wait(WRITE, HZ/5); - goto repeat; + + /* + * We try to shrink LRUs in 5 passes: + * 0 = Reclaim from inactive_list only + * 1 = Reclaim from active list but don't reclaim mapped + * 2 = 2nd pass of type 1 + * 3 = Reclaim mapped (normal reclaim) + * 4 = 2nd pass of type 3 + */ + for (pass = 0; pass < 5; pass++) { + int prio; + + /* Needed for shrinking slab caches later on */ + if (!lru_pages) + for_each_zone(zone) { + lru_pages += zone->nr_active; + lru_pages += zone->nr_inactive; + } + + /* Force reclaiming mapped pages in the passes #3 and #4 */ + if (pass > 2) { + sc.may_swap = 1; + sc.swappiness = 100; + } + + for (prio = DEF_PRIORITY; prio >= 0; prio--) { + unsigned long nr_to_scan = nr_pages - ret; + + sc.nr_mapped = read_page_state(nr_mapped); + sc.nr_scanned = 0; + + ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); + if (ret >= nr_pages) + goto out; + + reclaim_state.reclaimed_slab = 0; + shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); + ret += reclaim_state.reclaimed_slab; + if (ret >= nr_pages) + goto out; + + if (sc.nr_scanned && prio < DEF_PRIORITY - 2) + blk_congestion_wait(WRITE, HZ / 10); + } + + lru_pages = 0; } + + /* + * If ret = 0, we could not shrink LRUs, but there may be something + * in slab caches + */ + if (!ret) + do { + reclaim_state.reclaimed_slab = 0; + shrink_slab(nr_pages, sc.gfp_mask, lru_pages); + ret += reclaim_state.reclaimed_slab; + } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); + +out: current->reclaim_state = NULL; + return ret; } #endif @@ -1416,6 +1524,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) .swap_cluster_max = max_t(unsigned long, nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = gfp_mask, + .swappiness = vm_swappiness, }; disable_swap_token(); -- cgit v1.2.3 From ce4ab0012b32c1a4a1d6e934aeb73bf3151c48d9 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 23 Jun 2006 02:04:44 -0700 Subject: [PATCH] swsusp: add architecture special saveable pages support 1. Add architecture specific pages save/restore support. Next two patches will use this to save/restore 'ACPI NVS' pages. 2. Allow reserved pages 'nosave'. This could avoid save/restore BIOS reserved pages. Signed-off-by: Shaohua Li Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Nigel Cunningham Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/suspend.h | 1 + kernel/power/power.h | 4 ++ kernel/power/snapshot.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++- kernel/power/swsusp.c | 18 ++------ 4 files changed, 118 insertions(+), 15 deletions(-) (limited to 'kernel/power') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 96e31aa64cc7..e82cb10fb3ea 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -71,6 +71,7 @@ struct saved_context; void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); unsigned long get_safe_page(gfp_t gfp_mask); +int swsusp_add_arch_pages(unsigned long start, unsigned long end); /* * XXX: We try to keep some more pages free so that I/O operations succeed diff --git a/kernel/power/power.h b/kernel/power/power.h index f06f12f21767..c81f0ed3eeba 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -105,6 +105,10 @@ extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits); extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap); extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap); +extern unsigned int count_special_pages(void); +extern int save_special_mem(void); +extern int restore_special_mem(void); + extern int swsusp_check(void); extern int swsusp_shrink_memory(void); extern void swsusp_free(void); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 3eeedbb13b78..7f511d89c667 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -39,6 +39,88 @@ static unsigned int nr_copy_pages; static unsigned int nr_meta_pages; static unsigned long *buffer; +struct arch_saveable_page { + unsigned long start; + unsigned long end; + char *data; + struct arch_saveable_page *next; +}; +static struct arch_saveable_page *arch_pages; + +int swsusp_add_arch_pages(unsigned long start, unsigned long end) +{ + struct arch_saveable_page *tmp; + + while (start < end) { + tmp = kzalloc(sizeof(struct arch_saveable_page), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + tmp->start = start; + tmp->end = ((start >> PAGE_SHIFT) + 1) << PAGE_SHIFT; + if (tmp->end > end) + tmp->end = end; + tmp->next = arch_pages; + start = tmp->end; + arch_pages = tmp; + } + return 0; +} + +static unsigned int count_arch_pages(void) +{ + unsigned int count = 0; + struct arch_saveable_page *tmp = arch_pages; + while (tmp) { + count++; + tmp = tmp->next; + } + return count; +} + +static int save_arch_mem(void) +{ + char *kaddr; + struct arch_saveable_page *tmp = arch_pages; + int offset; + + pr_debug("swsusp: Saving arch specific memory"); + while (tmp) { + tmp->data = (char *)__get_free_page(GFP_ATOMIC); + if (!tmp->data) + return -ENOMEM; + offset = tmp->start - (tmp->start & PAGE_MASK); + /* arch pages might haven't a 'struct page' */ + kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0); + memcpy(tmp->data + offset, kaddr + offset, + tmp->end - tmp->start); + kunmap_atomic(kaddr, KM_USER0); + + tmp = tmp->next; + } + return 0; +} + +static int restore_arch_mem(void) +{ + char *kaddr; + struct arch_saveable_page *tmp = arch_pages; + int offset; + + while (tmp) { + if (!tmp->data) + continue; + offset = tmp->start - (tmp->start & PAGE_MASK); + kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0); + memcpy(kaddr + offset, tmp->data + offset, + tmp->end - tmp->start); + kunmap_atomic(kaddr, KM_USER0); + free_page((long)tmp->data); + tmp->data = NULL; + tmp = tmp->next; + } + return 0; +} + #ifdef CONFIG_HIGHMEM unsigned int count_highmem_pages(void) { @@ -150,8 +232,35 @@ int restore_highmem(void) } return 0; } +#else +static unsigned int count_highmem_pages(void) {return 0;} +static int save_highmem(void) {return 0;} +static int restore_highmem(void) {return 0;} #endif +unsigned int count_special_pages(void) +{ + return count_arch_pages() + count_highmem_pages(); +} + +int save_special_mem(void) +{ + int ret; + ret = save_arch_mem(); + if (!ret) + ret = save_highmem(); + return ret; +} + +int restore_special_mem(void) +{ + int ret; + ret = restore_arch_mem(); + if (!ret) + ret = restore_highmem(); + return ret; +} + static int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; @@ -177,7 +286,6 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn) return 0; page = pfn_to_page(pfn); - BUG_ON(PageReserved(page) && PageNosave(page)); if (PageNosave(page)) return 0; if (PageReserved(page) && pfn_is_nosave(pfn)) diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index f9238faf76e4..78b6e71b0813 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -62,16 +62,6 @@ unsigned long image_size = 500 * 1024 * 1024; int in_suspend __nosavedata = 0; -#ifdef CONFIG_HIGHMEM -unsigned int count_highmem_pages(void); -int save_highmem(void); -int restore_highmem(void); -#else -static int save_highmem(void) { return 0; } -static int restore_highmem(void) { return 0; } -static unsigned int count_highmem_pages(void) { return 0; } -#endif - /** * The following functions are used for tracing the allocated * swap pages, so that they can be freed in case of an error. @@ -192,7 +182,7 @@ int swsusp_shrink_memory(void) printk("Shrinking memory... "); do { - size = 2 * count_highmem_pages(); + size = 2 * count_special_pages(); size += size / 50 + count_data_pages(); size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE + PAGES_FOR_IO; @@ -234,7 +224,7 @@ int swsusp_suspend(void) goto Enable_irqs; } - if ((error = save_highmem())) { + if ((error = save_special_mem())) { printk(KERN_ERR "swsusp: Not enough free pages for highmem\n"); goto Restore_highmem; } @@ -245,7 +235,7 @@ int swsusp_suspend(void) /* Restore control flow magically appears here */ restore_processor_state(); Restore_highmem: - restore_highmem(); + restore_special_mem(); device_power_up(); Enable_irqs: local_irq_enable(); @@ -271,7 +261,7 @@ int swsusp_resume(void) */ swsusp_free(); restore_processor_state(); - restore_highmem(); + restore_special_mem(); touch_softlockup_watchdog(); device_power_up(); local_irq_enable(); -- cgit v1.2.3 From a938c356d5b007fe6d28251c0ddbf6c11d0d92b5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 23 Jun 2006 02:04:46 -0700 Subject: [PATCH] swsusp: take lowmem reserves into account swsusp allocates memory from the normal zone, so it cannot use lowmem reserve pages from the lower zones. Therefore it should not count these pages as available to it. Signed-off-by: Rafael J. Wysocki Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swsusp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel/power') diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 78b6e71b0813..f0ee4e7780d6 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -188,8 +188,10 @@ int swsusp_shrink_memory(void) PAGES_FOR_IO; tmp = size; for_each_zone (zone) - if (!is_highmem(zone)) + if (!is_highmem(zone) && populated_zone(zone)) { tmp -= zone->free_pages; + tmp += zone->lowmem_reserve[ZONE_NORMAL]; + } if (tmp > 0) { tmp = __shrink_memory(tmp); if (!tmp) -- cgit v1.2.3 From 7bff24e255ee11ecbc304315a252fcbd84f9ffce Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Fri, 23 Jun 2006 02:04:47 -0700 Subject: [PATCH] kernel/power/snapshot.c: cleanups - make needlessly global functions static - make dummy functions static inline Signed-off-by: Adrian Bunk Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/snapshot.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'kernel/power') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 7f511d89c667..513eef3391a0 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -122,7 +122,7 @@ static int restore_arch_mem(void) } #ifdef CONFIG_HIGHMEM -unsigned int count_highmem_pages(void) +static unsigned int count_highmem_pages(void) { struct zone *zone; unsigned long zone_pfn; @@ -199,7 +199,7 @@ static int save_highmem_zone(struct zone *zone) return 0; } -int save_highmem(void) +static int save_highmem(void) { struct zone *zone; int res = 0; @@ -216,7 +216,7 @@ int save_highmem(void) return 0; } -int restore_highmem(void) +static int restore_highmem(void) { printk("swsusp: Restoring Highmem\n"); while (highmem_copy) { @@ -233,9 +233,9 @@ int restore_highmem(void) return 0; } #else -static unsigned int count_highmem_pages(void) {return 0;} -static int save_highmem(void) {return 0;} -static int restore_highmem(void) {return 0;} +static inline unsigned int count_highmem_pages(void) {return 0;} +static inline int save_highmem(void) {return 0;} +static inline int restore_highmem(void) {return 0;} #endif unsigned int count_special_pages(void) @@ -482,7 +482,8 @@ unsigned long get_safe_page(gfp_t gfp_mask) * On each page we set up a list of struct_pbe elements. */ -struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed) +static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, + int safe_needed) { unsigned int num; struct pbe *pblist, *pbe; -- cgit v1.2.3 From 968808b8956e332e556b1eae9b4f7df77518f53b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 23 Jun 2006 02:04:48 -0700 Subject: [PATCH] swsusp: use less memory during resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make swsusp allocate only as much memory as needed to store the image data and metadata during resume. Without this patch swsusp additionally allocates many page frames that will conflict with the "original" locations of the image data and are considered as "unsafe", treating them as "eaten" pages (ie. allocated but unusable). The patch makes swsusp allocate as many pages as it'll need to store the data read from the image in one shot, creating a list of allocated "safe" pages, and use the observation that all pages allocated by it are marked with the PG_nosave and PG_nosave_free flags set.  Namely, when it's about to load an image page, swsusp can check whether the page frame corresponding to the "original" location of this page has been allocated (ie. if the page frame has the PG_nosave and PG_nosave_free flags set) and if so, it can load the page directly into this page frame.  Otherwise it uses an allocated "safe" page from the list to store the data that will be copied to their "original" location later on. This allows us to save many page copyings and page allocations during resume and in the future it may allow us to load images greater than 50% of the normal zone. Signed-off-by: Rafael J. Wysocki Acked-by: "Pavel Machek" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/power.h | 2 +- kernel/power/snapshot.c | 141 ++++++++++++++++++++++++++++-------------------- 2 files changed, 85 insertions(+), 58 deletions(-) (limited to 'kernel/power') diff --git a/kernel/power/power.h b/kernel/power/power.h index c81f0ed3eeba..98c41423f3b1 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -55,7 +55,7 @@ struct snapshot_handle { unsigned int page; unsigned int page_offset; unsigned int prev; - struct pbe *pbe; + struct pbe *pbe, *last_pbe; void *buffer; unsigned int buf_offset; }; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 513eef3391a0..3d9284100b22 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -401,62 +401,29 @@ static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) } } -/** - * On resume it is necessary to trace and eventually free the unsafe - * pages that have been allocated, because they are needed for I/O - * (on x86-64 we likely will "eat" these pages once again while - * creating the temporary page translation tables) - */ - -struct eaten_page { - struct eaten_page *next; - char padding[PAGE_SIZE - sizeof(void *)]; -}; - -static struct eaten_page *eaten_pages = NULL; - -static void release_eaten_pages(void) -{ - struct eaten_page *p, *q; - - p = eaten_pages; - while (p) { - q = p->next; - /* We don't want swsusp_free() to free this page again */ - ClearPageNosave(virt_to_page(p)); - free_page((unsigned long)p); - p = q; - } - eaten_pages = NULL; -} +static unsigned int unsafe_pages; /** * @safe_needed - on resume, for storing the PBE list and the image, * we can only use memory pages that do not conflict with the pages - * which had been used before suspend. + * used before suspend. * * The unsafe pages are marked with the PG_nosave_free flag - * - * Allocated but unusable (ie eaten) memory pages should be marked - * so that swsusp_free() can release them + * and we count them using unsafe_pages */ static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; + res = (void *)get_zeroed_page(gfp_mask); if (safe_needed) - do { + while (res && PageNosaveFree(virt_to_page(res))) { + /* The page is unsafe, mark it for swsusp_free() */ + SetPageNosave(virt_to_page(res)); + unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); - if (res && PageNosaveFree(virt_to_page(res))) { - /* This is for swsusp_free() */ - SetPageNosave(virt_to_page(res)); - ((struct eaten_page *)res)->next = eaten_pages; - eaten_pages = res; - } - } while (res && PageNosaveFree(virt_to_page(res))); - else - res = (void *)get_zeroed_page(gfp_mask); + } if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); @@ -751,6 +718,8 @@ static int mark_unsafe_pages(struct pbe *pblist) return -EFAULT; } + unsafe_pages = 0; + return 0; } @@ -828,42 +797,99 @@ static inline struct pbe *unpack_orig_addresses(unsigned long *buf, } /** - * create_image - use metadata contained in the PBE list + * prepare_image - use metadata contained in the PBE list * pointed to by pagedir_nosave to mark the pages that will * be overwritten in the process of restoring the system - * memory state from the image and allocate memory for - * the image avoiding these pages + * memory state from the image ("unsafe" pages) and allocate + * memory for the image + * + * The idea is to allocate the PBE list first and then + * allocate as many pages as it's needed for the image data, + * but not to assign these pages to the PBEs initially. + * Instead, we just mark them as allocated and create a list + * of "safe" which will be used later */ -static int create_image(struct snapshot_handle *handle) +struct safe_page { + struct safe_page *next; + char padding[PAGE_SIZE - sizeof(void *)]; +}; + +static struct safe_page *safe_pages; + +static int prepare_image(struct snapshot_handle *handle) { int error = 0; - struct pbe *p, *pblist; + unsigned int nr_pages = nr_copy_pages; + struct pbe *p, *pblist = NULL; p = pagedir_nosave; error = mark_unsafe_pages(p); if (!error) { - pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); + pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); if (pblist) copy_page_backup_list(pblist, p); free_pagedir(p, 0); if (!pblist) error = -ENOMEM; } - if (!error) - error = alloc_data_pages(pblist, GFP_ATOMIC, 1); + safe_pages = NULL; + if (!error && nr_pages > unsafe_pages) { + nr_pages -= unsafe_pages; + while (nr_pages--) { + struct safe_page *ptr; + + ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC); + if (!ptr) { + error = -ENOMEM; + break; + } + if (!PageNosaveFree(virt_to_page(ptr))) { + /* The page is "safe", add it to the list */ + ptr->next = safe_pages; + safe_pages = ptr; + } + /* Mark the page as allocated */ + SetPageNosave(virt_to_page(ptr)); + SetPageNosaveFree(virt_to_page(ptr)); + } + } if (!error) { - release_eaten_pages(); pagedir_nosave = pblist; } else { - pagedir_nosave = NULL; handle->pbe = NULL; - nr_copy_pages = 0; - nr_meta_pages = 0; + swsusp_free(); } return error; } +static void *get_buffer(struct snapshot_handle *handle) +{ + struct pbe *pbe = handle->pbe, *last = handle->last_pbe; + struct page *page = virt_to_page(pbe->orig_address); + + if (PageNosave(page) && PageNosaveFree(page)) { + /* + * We have allocated the "original" page frame and we can + * use it directly to store the read page + */ + pbe->address = 0; + if (last && last->next) + last->next = NULL; + return (void *)pbe->orig_address; + } + /* + * The "original" page frame has not been allocated and we have to + * use a "safe" page frame to store the read page + */ + pbe->address = (unsigned long)safe_pages; + safe_pages = safe_pages->next; + if (last) + last->next = pbe; + handle->last_pbe = pbe; + return (void *)pbe->address; +} + /** * snapshot_write_next - used for writing the system memory snapshot. * @@ -908,15 +934,16 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) } else if (handle->prev <= nr_meta_pages) { handle->pbe = unpack_orig_addresses(buffer, handle->pbe); if (!handle->pbe) { - error = create_image(handle); + error = prepare_image(handle); if (error) return error; handle->pbe = pagedir_nosave; - handle->buffer = (void *)handle->pbe->address; + handle->last_pbe = NULL; + handle->buffer = get_buffer(handle); } } else { handle->pbe = handle->pbe->next; - handle->buffer = (void *)handle->pbe->address; + handle->buffer = get_buffer(handle); } handle->prev = handle->page; } -- cgit v1.2.3 From eb71c87a492b7090ff9e8ac46912c480a1687e38 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 24 Jun 2006 14:27:42 -0700 Subject: Add some basic resume trace facilities Considering that there isn't a lot of hw we can depend on during resume, this is about as good as it gets. This is x86-only for now, although the basic concept (and most of the code) will certainly work on almost any platform. Signed-off-by: Linus Torvalds --- arch/i386/kernel/vmlinux.lds.S | 7 ++ drivers/base/power/Makefile | 1 + drivers/base/power/trace.c | 228 +++++++++++++++++++++++++++++++++++++++++ include/asm-generic/rtc.h | 7 +- include/linux/resume-trace.h | 30 ++++++ kernel/power/Kconfig | 9 ++ 6 files changed, 279 insertions(+), 3 deletions(-) create mode 100644 drivers/base/power/trace.c create mode 100644 include/linux/resume-trace.h (limited to 'kernel/power') diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 8831303a473f..7512f39c9f25 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S @@ -37,6 +37,13 @@ SECTIONS RODATA + . = ALIGN(4); + __tracedata_start = .; + .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { + *(.tracedata) + } + __tracedata_end = .; + /* writeable */ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ *(.data) diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index ceeeba2c56c7..91f230939c1e 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,5 +1,6 @@ obj-y := shutdown.o obj-$(CONFIG_PM) += main.o suspend.o resume.o runtime.o sysfs.o +obj-$(CONFIG_PM_TRACE) += trace.o ifeq ($(CONFIG_DEBUG_DRIVER),y) EXTRA_CFLAGS += -DDEBUG diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c new file mode 100644 index 000000000000..a9ab30fefffc --- /dev/null +++ b/drivers/base/power/trace.c @@ -0,0 +1,228 @@ +/* + * drivers/base/power/trace.c + * + * Copyright (C) 2006 Linus Torvalds + * + * Trace facility for suspend/resume problems, when none of the + * devices may be working. + */ + +#include +#include + +#include + +#include "power.h" + +/* + * Horrid, horrid, horrid. + * + * It turns out that the _only_ piece of hardware that actually + * keeps its value across a hard boot (and, more importantly, the + * POST init sequence) is literally the realtime clock. + * + * Never mind that an RTC chip has 114 bytes (and often a whole + * other bank of an additional 128 bytes) of nice SRAM that is + * _designed_ to keep data - the POST will clear it. So we literally + * can just use the few bytes of actual time data, which means that + * we're really limited. + * + * It means, for example, that we can't use the seconds at all + * (since the time between the hang and the boot might be more + * than a minute), and we'd better not depend on the low bits of + * the minutes either. + * + * There are the wday fields etc, but I wouldn't guarantee those + * are dependable either. And if the date isn't valid, either the + * hw or POST will do strange things. + * + * So we're left with: + * - year: 0-99 + * - month: 0-11 + * - day-of-month: 1-28 + * - hour: 0-23 + * - min: (0-30)*2 + * + * Giving us a total range of 0-16128000 (0xf61800), ie less + * than 24 bits of actual data we can save across reboots. + * + * And if your box can't boot in less than three minutes, + * you're screwed. + * + * Now, almost 24 bits of data is pitifully small, so we need + * to be pretty dense if we want to use it for anything nice. + * What we do is that instead of saving off nice readable info, + * we save off _hashes_ of information that we can hopefully + * regenerate after the reboot. + * + * In particular, this means that we might be unlucky, and hit + * a case where we have a hash collision, and we end up not + * being able to tell for certain exactly which case happened. + * But that's hopefully unlikely. + * + * What we do is to take the bits we can fit, and split them + * into three parts (16*997*1009 = 16095568), and use the values + * for: + * - 0-15: user-settable + * - 0-996: file + line number + * - 0-1008: device + */ +#define USERHASH (16) +#define FILEHASH (997) +#define DEVHASH (1009) + +#define DEVSEED (7919) + +static unsigned int dev_hash_value; + +static int set_magic_time(unsigned int user, unsigned int file, unsigned int device) +{ + unsigned int n = user + USERHASH*(file + FILEHASH*device); + + // June 7th, 2006 + static struct rtc_time time = { + .tm_sec = 0, + .tm_min = 0, + .tm_hour = 0, + .tm_mday = 7, + .tm_mon = 5, // June - counting from zero + .tm_year = 106, + .tm_wday = 3, + .tm_yday = 160, + .tm_isdst = 1 + }; + + time.tm_year = (n % 100); + n /= 100; + time.tm_mon = (n % 12); + n /= 12; + time.tm_mday = (n % 28) + 1; + n /= 28; + time.tm_hour = (n % 24); + n /= 24; + time.tm_min = (n % 20) * 3; + n /= 20; + set_rtc_time(&time); + return n ? -1 : 0; +} + +static unsigned int read_magic_time(void) +{ + struct rtc_time time; + unsigned int val; + + get_rtc_time(&time); + printk("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", + time.tm_hour, time.tm_min, time.tm_sec, + time.tm_mon, time.tm_mday, time.tm_year); + val = time.tm_year; /* 100 years */ + if (val > 100) + val -= 100; + val += time.tm_mon * 100; /* 12 months */ + val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */ + val += time.tm_hour * 100 * 12 * 28; /* 24 hours */ + val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */ + return val; +} + +/* + * This is just the sdbm hash function with a user-supplied + * seed and final size parameter. + */ +static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod) +{ + unsigned char c; + while ((c = *data++) != 0) { + seed = (seed << 16) + (seed << 6) - seed + c; + } + return seed % mod; +} + +void set_trace_device(struct device *dev) +{ + dev_hash_value = hash_string(DEVSEED, dev->bus_id, DEVHASH); +} + +/* + * We could just take the "tracedata" index into the .tracedata + * section instead. Generating a hash of the data gives us a + * chance to work across kernel versions, and perhaps more + * importantly it also gives us valid/invalid check (ie we will + * likely not give totally bogus reports - if the hash matches, + * it's not any guarantee, but it's a high _likelihood_ that + * the match is valid). + */ +void generate_resume_trace(void *tracedata, unsigned int user) +{ + unsigned short lineno = *(unsigned short *)tracedata; + const char *file = *(const char **)(tracedata + 2); + unsigned int user_hash_value, file_hash_value; + + user_hash_value = user % USERHASH; + file_hash_value = hash_string(lineno, file, FILEHASH); + set_magic_time(user_hash_value, file_hash_value, dev_hash_value); +} + +extern char __tracedata_start, __tracedata_end; +static int show_file_hash(unsigned int value) +{ + int match; + char *tracedata; + + match = 0; + for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ; tracedata += 6) { + unsigned short lineno = *(unsigned short *)tracedata; + const char *file = *(const char **)(tracedata + 2); + unsigned int hash = hash_string(lineno, file, FILEHASH); + if (hash != value) + continue; + printk(" hash matches %s:%u\n", file, lineno); + match++; + } + return match; +} + +static int show_dev_hash(unsigned int value) +{ + int match = 0; + struct list_head * entry = dpm_active.prev; + + while (entry != &dpm_active) { + struct device * dev = to_device(entry); + unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); + if (hash == value) { + printk(" hash matches device %s\n", dev->bus_id); + match++; + } + entry = entry->prev; + } + return match; +} + +static unsigned int hash_value_early_read; + +static int early_resume_init(void) +{ + hash_value_early_read = read_magic_time(); + return 0; +} + +static int late_resume_init(void) +{ + unsigned int val = hash_value_early_read; + unsigned int user, file, dev; + + user = val % USERHASH; + val = val / USERHASH; + file = val % FILEHASH; + val = val / FILEHASH; + dev = val /* % DEVHASH */; + + printk(" Magic number: %d:%d:%d\n", user, file, dev); + show_file_hash(file); + show_dev_hash(dev); + return 0; +} + +core_initcall(early_resume_init); +late_initcall(late_resume_init); diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h index cef08db34ada..4087037a4225 100644 --- a/include/asm-generic/rtc.h +++ b/include/asm-generic/rtc.h @@ -114,6 +114,7 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) /* Set the current date and time in the real time clock. */ static inline int set_rtc_time(struct rtc_time *time) { + unsigned long flags; unsigned char mon, day, hrs, min, sec; unsigned char save_control, save_freq_select; unsigned int yrs; @@ -131,7 +132,7 @@ static inline int set_rtc_time(struct rtc_time *time) if (yrs > 255) /* They are unsigned */ return -EINVAL; - spin_lock_irq(&rtc_lock); + spin_lock_irqsave(&rtc_lock, flags); #ifdef CONFIG_MACH_DECSTATION real_yrs = yrs; leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) || @@ -152,7 +153,7 @@ static inline int set_rtc_time(struct rtc_time *time) * whether the chip is in binary mode or not. */ if (yrs > 169) { - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); return -EINVAL; } @@ -187,7 +188,7 @@ static inline int set_rtc_time(struct rtc_time *time) CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); return 0; } diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h new file mode 100644 index 000000000000..a376bd4ade39 --- /dev/null +++ b/include/linux/resume-trace.h @@ -0,0 +1,30 @@ +#ifndef RESUME_TRACE_H +#define RESUME_TRACE_H + +#ifdef CONFIG_PM_TRACE + +struct device; +extern void set_trace_device(struct device *); +extern void generate_resume_trace(void *tracedata, unsigned int user); + +#define TRACE_DEVICE(dev) set_trace_device(dev) +#define TRACE_RESUME(user) do { \ + void *tracedata; \ + asm volatile("movl $1f,%0\n" \ + ".section .tracedata,\"a\"\n" \ + "1:\t.word %c1\n" \ + "\t.long %c2\n" \ + ".previous" \ + :"=r" (tracedata) \ + : "i" (__LINE__), "i" (__FILE__)); \ + generate_resume_trace(tracedata, user); \ +} while (0) + +#else + +#define TRACE_DEVICE(dev) do { } while (0) +#define TRACE_RESUME(dev) do { } while (0) + +#endif + +#endif diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index ce0dfb8f4a4e..cdf315e794ff 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -36,6 +36,15 @@ config PM_DEBUG code. This is helpful when debugging and reporting various PM bugs, like suspend support. +config PM_TRACE + bool "Suspend/resume event tracing" + depends on PM && PM_DEBUG && X86 + default y + ---help--- + This enables some cheesy code to save the last PM event point in the + RTC across reboots, so that you can debug a machine that just hangs + during suspend (or more commonly, during resume). + config SOFTWARE_SUSPEND bool "Software Suspend" depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) -- cgit v1.2.3 From 3b364b8d584b94777f8446a943b3c65e75e758f8 Mon Sep 17 00:00:00 2001 From: Andreas Mohr Date: Sun, 25 Jun 2006 05:47:56 -0700 Subject: [PATCH] constify parts of kernel/power/ Signed-off-by: Andreas Mohr Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/disk.c | 2 +- kernel/power/main.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/power') diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 81d4d982f3f0..e13e74067845 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -231,7 +231,7 @@ static int software_resume(void) late_initcall(software_resume); -static char * pm_disk_modes[] = { +static const char * const pm_disk_modes[] = { [PM_DISK_FIRMWARE] = "firmware", [PM_DISK_PLATFORM] = "platform", [PM_DISK_SHUTDOWN] = "shutdown", diff --git a/kernel/power/main.c b/kernel/power/main.c index cdf0f07af92f..6d295c776794 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -145,7 +145,7 @@ static void suspend_finish(suspend_state_t state) -static char *pm_states[PM_SUSPEND_MAX] = { +static const char * const pm_states[PM_SUSPEND_MAX] = { [PM_SUSPEND_STANDBY] = "standby", [PM_SUSPEND_MEM] = "mem", #ifdef CONFIG_SOFTWARE_SUSPEND @@ -262,7 +262,7 @@ static ssize_t state_show(struct subsystem * subsys, char * buf) static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n) { suspend_state_t state = PM_SUSPEND_STANDBY; - char ** s; + const char * const *s; char *p; int error; int len; -- cgit v1.2.3 From 72cf2709bf8e0410800f118c4298bfbf8715b303 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 25 Jun 2006 10:04:15 -0700 Subject: Fix PM_TRACE dependency: works only on 32-bit x86 for now Not that x86-64 and other architecture support should be difficult to add (trivial fixups to the data format and add the proper linker script entry). Signed-off-by: Linus Torvalds --- kernel/power/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/power') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index cdf315e794ff..fc311a4673a2 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -38,7 +38,7 @@ config PM_DEBUG config PM_TRACE bool "Suspend/resume event tracing" - depends on PM && PM_DEBUG && X86 + depends on PM && PM_DEBUG && X86_32 default y ---help--- This enables some cheesy code to save the last PM event point in the -- cgit v1.2.3 From 3448097fccdce4ea8f0fcad4f37f502a8cd72e68 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 25 Jun 2006 18:41:00 -0700 Subject: Revert "swsusp special saveable pages support" commits This reverts commits 3e3318dee0878d42ed62a19c292a2ac284135db3 [PATCH] swsusp: x86_64 mark special saveable/unsaveable pages b6370d96e09944c6e3ae8d5743ca8a8ab1f79f6c [PATCH] swsusp: i386 mark special saveable/unsaveable pages ce4ab0012b32c1a4a1d6e934aeb73bf3151c48d9 [PATCH] swsusp: add architecture special saveable pages support because not only do they apparently cause page faults on x86, the infrastructure doesn't compile on powerpc. Signed-off-by: Linus Torvalds --- arch/i386/kernel/setup.c | 106 ------------------------------------------ arch/x86_64/kernel/setup.c | 95 -------------------------------------- include/linux/suspend.h | 1 - kernel/power/power.h | 4 -- kernel/power/snapshot.c | 112 ++------------------------------------------- kernel/power/swsusp.c | 18 ++++++-- 6 files changed, 18 insertions(+), 318 deletions(-) (limited to 'kernel/power') diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 6c1639836e06..6bef9273733e 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -48,7 +48,6 @@ #include #include #include -#include #include