diff options
author | Chintan Pandya <cpandya@codeaurora.org> | 2018-06-07 17:06:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-07 17:34:35 -0700 |
commit | 82a2e924ff2c6accbc840dfa46c42b24da457a31 (patch) | |
tree | ac1421a481d921b4f3c0f748f450502e951da2b6 /mm | |
parent | 08994b24673b6ae33ee40fc3b5e265c6762848e4 (diff) | |
download | lwn-82a2e924ff2c6accbc840dfa46c42b24da457a31.tar.gz lwn-82a2e924ff2c6accbc840dfa46c42b24da457a31.zip |
mm: vmalloc: clean up vunmap to avoid pgtable ops twice
vunmap does page table clear operations twice in the case when
DEBUG_PAGEALLOC_ENABLE_DEFAULT is enabled.
So, clean up the code as that is unintended.
As a perf gain, we save few us. Below ftrace data was obtained while
doing 1 MB of vmalloc/vfree on ARM64 based SoC *without* this patch
applied. After this patch, we can save ~3 us (on 1 extra
vunmap_page_range).
CPU DURATION FUNCTION CALLS
| | | | | | |
6) | __vunmap() {
6) | vmap_debug_free_range() {
6) 3.281 us | vunmap_page_range();
6) + 45.468 us | }
6) 2.760 us | vunmap_page_range();
6) ! 505.105 us | }
[cpandya@codeaurora.org: v3]
Link: http://lkml.kernel.org/r/1525176960-18408-1-git-send-email-cpandya@codeaurora.org
Link: http://lkml.kernel.org/r/1523876342-10545-1-git-send-email-cpandya@codeaurora.org
Signed-off-by: Chintan Pandya <cpandya@codeaurora.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Byungchul Park <byungchul.park@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmalloc.c | 29 |
1 files changed, 7 insertions, 22 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 63a5f502da08..12bd82e6554e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -603,26 +603,6 @@ static void unmap_vmap_area(struct vmap_area *va) vunmap_page_range(va->va_start, va->va_end); } -static void vmap_debug_free_range(unsigned long start, unsigned long end) -{ - /* - * Unmap page tables and force a TLB flush immediately if pagealloc - * debugging is enabled. This catches use after free bugs similarly to - * those in linear kernel virtual address space after a page has been - * freed. - * - * All the lazy freeing logic is still retained, in order to minimise - * intrusiveness of this debugging feature. - * - * This is going to be *slow* (linear kernel virtual address debugging - * doesn't do a broadcast TLB flush so it is a lot faster). - */ - if (debug_pagealloc_enabled()) { - vunmap_page_range(start, end); - flush_tlb_kernel_range(start, end); - } -} - /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. @@ -756,6 +736,9 @@ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); unmap_vmap_area(va); + if (debug_pagealloc_enabled()) + flush_tlb_kernel_range(va->va_start, va->va_end); + free_vmap_area_noflush(va); } @@ -1053,6 +1036,10 @@ static void vb_free(const void *addr, unsigned long size) vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); + if (debug_pagealloc_enabled()) + flush_tlb_kernel_range((unsigned long)addr, + (unsigned long)addr + size); + spin_lock(&vb->lock); /* Expand dirty range */ @@ -1142,7 +1129,6 @@ void vm_unmap_ram(const void *mem, unsigned int count) BUG_ON(!PAGE_ALIGNED(addr)); debug_check_no_locks_freed(mem, size); - vmap_debug_free_range(addr, addr+size); if (likely(count <= VMAP_MAX_ALLOC)) { vb_free(mem, size); @@ -1499,7 +1485,6 @@ struct vm_struct *remove_vm_area(const void *addr) va->flags |= VM_LAZY_FREE; spin_unlock(&vmap_area_lock); - vmap_debug_free_range(va->va_start, va->va_end); kasan_free_shadow(vm); free_unmap_vmap_area(va); |