diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-10-08 16:29:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:22:27 +0900 |
commit | 26234f36ef3ec7efcfa9acb181427849c1f9db7c (patch) | |
tree | 00c84ab6794fe54c75586eb211d75d80bb038646 /mm/huge_memory.c | |
parent | 420256ef02660af0acf28c12fe4b7d514ca88a4d (diff) | |
download | lwn-26234f36ef3ec7efcfa9acb181427849c1f9db7c.tar.gz lwn-26234f36ef3ec7efcfa9acb181427849c1f9db7c.zip |
thp: introduce khugepaged_prealloc_page and khugepaged_alloc_page
They are used to abstract the difference between NUMA enabled and NUMA
disabled to make the code more readable
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 166 |
1 files changed, 98 insertions, 68 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9c4390f60c3e..f0e999379dd7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1827,28 +1827,34 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, } } -static void collapse_huge_page(struct mm_struct *mm, - unsigned long address, - struct page **hpage, - struct vm_area_struct *vma, - int node) +static void khugepaged_alloc_sleep(void) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd, _pmd; - pte_t *pte; - pgtable_t pgtable; - struct page *new_page; - spinlock_t *ptl; - int isolated; - unsigned long hstart, hend; + wait_event_freezable_timeout(khugepaged_wait, false, + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); +} - VM_BUG_ON(address & ~HPAGE_PMD_MASK); -#ifndef CONFIG_NUMA - up_read(&mm->mmap_sem); - VM_BUG_ON(!*hpage); - new_page = *hpage; -#else +#ifdef CONFIG_NUMA +static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) +{ + if (IS_ERR(*hpage)) { + if (!*wait) + return false; + + *wait = false; + khugepaged_alloc_sleep(); + } else if (*hpage) { + put_page(*hpage); + *hpage = NULL; + } + + return true; +} + +static struct page +*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int node) +{ VM_BUG_ON(*hpage); /* * Allocate the page while the vma is still valid and under @@ -1860,7 +1866,7 @@ static void collapse_huge_page(struct mm_struct *mm, * mmap_sem in read mode is good idea also to allow greater * scalability. */ - new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, + *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address, node, __GFP_OTHER_NODE); /* @@ -1868,15 +1874,81 @@ static void collapse_huge_page(struct mm_struct *mm, * preparation for taking it in write mode. */ up_read(&mm->mmap_sem); - if (unlikely(!new_page)) { + if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); - return; + return NULL; } - *hpage = new_page; + count_vm_event(THP_COLLAPSE_ALLOC); + return *hpage; +} +#else +static struct page *khugepaged_alloc_hugepage(bool *wait) +{ + struct page *hpage; + + do { + hpage = alloc_hugepage(khugepaged_defrag()); + if (!hpage) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); + if (!*wait) + return NULL; + + *wait = false; + khugepaged_alloc_sleep(); + } else + count_vm_event(THP_COLLAPSE_ALLOC); + } while (unlikely(!hpage) && likely(khugepaged_enabled())); + + return hpage; +} + +static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) +{ + if (!*hpage) + *hpage = khugepaged_alloc_hugepage(wait); + + if (unlikely(!*hpage)) + return false; + + return true; +} + +static struct page +*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int node) +{ + up_read(&mm->mmap_sem); + VM_BUG_ON(!*hpage); + return *hpage; +} #endif +static void collapse_huge_page(struct mm_struct *mm, + unsigned long address, + struct page **hpage, + struct vm_area_struct *vma, + int node) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd, _pmd; + pte_t *pte; + pgtable_t pgtable; + struct page *new_page; + spinlock_t *ptl; + int isolated; + unsigned long hstart, hend; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + /* release the mmap_sem read lock. */ + new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); + if (!new_page) + return; + if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) return; @@ -2215,34 +2287,6 @@ static int khugepaged_wait_event(void) kthread_should_stop(); } -static void khugepaged_alloc_sleep(void) -{ - wait_event_freezable_timeout(khugepaged_wait, false, - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); -} - -#ifndef CONFIG_NUMA -static struct page *khugepaged_alloc_hugepage(bool *wait) -{ - struct page *hpage; - - do { - hpage = alloc_hugepage(khugepaged_defrag()); - if (!hpage) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - if (!*wait) - return NULL; - - *wait = false; - khugepaged_alloc_sleep(); - } else - count_vm_event(THP_COLLAPSE_ALLOC); - } while (unlikely(!hpage) && likely(khugepaged_enabled())); - - return hpage; -} -#endif - static void khugepaged_do_scan(void) { struct page *hpage = NULL; @@ -2253,23 +2297,9 @@ static void khugepaged_do_scan(void) barrier(); /* write khugepaged_pages_to_scan to local stack */ while (progress < pages) { -#ifndef CONFIG_NUMA - if (!hpage) - hpage = khugepaged_alloc_hugepage(&wait); - - if (unlikely(!hpage)) + if (!khugepaged_prealloc_page(&hpage, &wait)) break; -#else - if (IS_ERR(hpage)) { - if (!wait) - break; - wait = false; - khugepaged_alloc_sleep(); - } else if (hpage) { - put_page(hpage); - hpage = NULL; - } -#endif + cond_resched(); if (unlikely(kthread_should_stop() || freezing(current))) |