diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 17:58:59 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 08:53:17 -0800 |
commit | af8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba (patch) | |
tree | 8dc0ece80878d00409d4662c5fd1e28cd7fbbdd8 /mm/internal.h | |
parent | 53f79acb6ecb648afd63e0f13deba167f1a934df (diff) | |
download | lwn-af8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba.tar.gz lwn-af8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba.zip |
mm: CONFIG_MMU for PG_mlocked
Remove three degrees of obfuscation, left over from when we had
CONFIG_UNEVICTABLE_LRU. MLOCK_PAGES is CONFIG_HAVE_MLOCKED_PAGE_BIT is
CONFIG_HAVE_MLOCK is CONFIG_MMU. rmap.o (and memory-failure.o) are only
built when CONFIG_MMU, so don't need such conditions at all.
Somehow, I feel no compulsion to remove the CONFIG_HAVE_MLOCK* lines from
169 defconfigs: leave those to evolve in due course.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/mm/internal.h b/mm/internal.h index 22ec8d2b0fb8..cb7d92d0a46d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page) return page_private(page); } -#ifdef CONFIG_HAVE_MLOCK -extern long mlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -extern void munlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -static inline void munlock_vma_pages_all(struct vm_area_struct *vma) -{ - munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); -} -#endif - /* * unevictable_migrate_page() called only from migrate_page_copy() to * migrate unevictable flag to new page. @@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) SetPageUnevictable(new); } -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT +#ifdef CONFIG_MMU +extern long mlock_vma_pages_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void munlock_vma_pages_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +static inline void munlock_vma_pages_all(struct vm_area_struct *vma) +{ + munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); +} + /* * Called only in fault path via page_evictable() for a new page * to determine if it's being mapped into a LOCKED vma. @@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) } } -#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#else /* !CONFIG_MMU */ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) { return 0; @@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } -#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#endif /* !CONFIG_MMU */ /* * Return the mem_map entry representing the 'offset' subpage within |