diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 11:39:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 11:39:33 -0700 |
commit | 81e11336d97e7a4c25a65c302ef2bf9cd9808ed4 (patch) | |
tree | 44e069e0bb26068137a1bda7ca5125c5b068b4ab /arch/arm64 | |
parent | 5631c5e0eb9035d92ceb20fcd9cdb7779a3f5cc7 (diff) | |
parent | 912c05720f00d039103d356a59c37dc7c3995e01 (diff) | |
download | lwn-81e11336d97e7a4c25a65c302ef2bf9cd9808ed4.tar.gz lwn-81e11336d97e7a4c25a65c302ef2bf9cd9808ed4.zip |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- a few MM hotfixes
- kthread, tools, scripts, ntfs and ocfs2
- some of MM
Subsystems affected by this patch series: kthread, tools, scripts, ntfs,
ocfs2 and mm (hofixes, pagealloc, slab-generic, slab, slub, kcsan,
debug, pagecache, gup, swap, shmem, memcg, pagemap, mremap, mincore,
sparsemem, vmalloc, kasan, pagealloc, hugetlb and vmscan).
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (162 commits)
mm: vmscan: consistent update to pgrefill
mm/vmscan.c: fix typo
khugepaged: khugepaged_test_exit() check mmget_still_valid()
khugepaged: retract_page_tables() remember to test exit
khugepaged: collapse_pte_mapped_thp() protect the pmd lock
khugepaged: collapse_pte_mapped_thp() flush the right range
mm/hugetlb: fix calculation of adjust_range_if_pmd_sharing_possible
mm: thp: replace HTTP links with HTTPS ones
mm/page_alloc: fix memalloc_nocma_{save/restore} APIs
mm/page_alloc.c: skip setting nodemask when we are in interrupt
mm/page_alloc: fallbacks at most has 3 elements
mm/page_alloc: silence a KASAN false positive
mm/page_alloc.c: remove unnecessary end_bitidx for [set|get]_pfnblock_flags_mask()
mm/page_alloc.c: simplify pageblock bitmap access
mm/page_alloc.c: extract the common part in pfn_to_bitidx()
mm/page_alloc.c: replace the definition of NR_MIGRATETYPE_BITS with PB_migratetype_bits
mm/shuffle: remove dynamic reconfiguration
mm/memory_hotplug: document why shuffle_zone() is relevant
mm/page_alloc: remove nr_free_pagecache_pages()
mm: remove vm_total_pages
...
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/pgalloc.h | 39 | ||||
-rw-r--r-- | arch/arm64/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 6 | ||||
-rw-r--r-- | arch/arm64/mm/ioremap.c | 1 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 59 |
7 files changed, 44 insertions, 65 deletions
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 58e93583ddb6..3c6a7f5988b1 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -13,37 +13,13 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> -#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ +#define __HAVE_ARCH_PGD_FREE +#include <asm-generic/pgalloc.h> #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_PGTABLE_USER; - struct page *page; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - - page = alloc_page(gfp); - if (!page) - return NULL; - if (!pgtable_pmd_page_ctor(page)) { - __free_page(page); - return NULL; - } - return page_address(page); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) -{ - BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1)); - pgtable_pmd_page_dtor(virt_to_page(pmdp)); - free_page((unsigned long)pmdp); -} - static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) { set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot)); @@ -62,17 +38,6 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) #if CONFIG_PGTABLE_LEVELS > 3 -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pud_t *)__get_free_page(GFP_PGTABLE_USER); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pudp) -{ - BUG_ON((unsigned long)pudp & (PAGE_SIZE-1)); - free_page((unsigned long)pudp); -} - static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot) { set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot)); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index c793276ec7ad..87e81d29e6fb 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -276,7 +276,7 @@ arch_initcall(reserve_memblock_reserved_regions); u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; -void __init setup_arch(char **cmdline_p) +void __init __no_sanitize_address setup_arch(char **cmdline_p) { init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index e43a8ff19f0f..8059d50bc8cb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -43,7 +43,6 @@ #include <asm/kvm_mmu.h> #include <asm/mmu_context.h> #include <asm/numa.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/smp_plat.h> #include <asm/sections.h> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index aa421bf4956e..55ecf6de9ff7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -17,7 +17,6 @@ #include <asm/mman.h> #include <asm/tlb.h> #include <asm/tlbflush.h> -#include <asm/pgalloc.h> /* * HugeTLB Support Matrix diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f8c19c6c8e71..481d22c32a2e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -430,11 +430,9 @@ void __init bootmem_init(void) #endif /* - * Sparsemem tries to allocate bootmem in memory_present(), so must be - * done after the fixed reservations. + * sparse_init() tries to allocate memory from memblock, so must be + * done after the fixed reservations */ - memblocks_present(); - sparse_init(); zone_sizes_init(min, max); diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 9be71bee902c..b5e83c46b23e 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -16,7 +16,6 @@ #include <asm/fixmap.h> #include <asm/tlbflush.h> -#include <asm/pgalloc.h> static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, pgprot_t prot, void *caller) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1df25f26571d..75df62fea1b6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -35,6 +35,7 @@ #include <asm/mmu_context.h> #include <asm/ptdump.h> #include <asm/tlbflush.h> +#include <asm/pgalloc.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -760,15 +761,20 @@ int kern_addr_valid(unsigned long addr) } #ifdef CONFIG_MEMORY_HOTPLUG -static void free_hotplug_page_range(struct page *page, size_t size) +static void free_hotplug_page_range(struct page *page, size_t size, + struct vmem_altmap *altmap) { - WARN_ON(PageReserved(page)); - free_pages((unsigned long)page_address(page), get_order(size)); + if (altmap) { + vmem_altmap_free(altmap, size >> PAGE_SHIFT); + } else { + WARN_ON(PageReserved(page)); + free_pages((unsigned long)page_address(page), get_order(size)); + } } static void free_hotplug_pgtable_page(struct page *page) { - free_hotplug_page_range(page, PAGE_SIZE); + free_hotplug_page_range(page, PAGE_SIZE, NULL); } static bool pgtable_range_aligned(unsigned long start, unsigned long end, @@ -791,7 +797,8 @@ static bool pgtable_range_aligned(unsigned long start, unsigned long end, } static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, - unsigned long end, bool free_mapped) + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) { pte_t *ptep, pte; @@ -805,12 +812,14 @@ static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, pte_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); if (free_mapped) - free_hotplug_page_range(pte_page(pte), PAGE_SIZE); + free_hotplug_page_range(pte_page(pte), + PAGE_SIZE, altmap); } while (addr += PAGE_SIZE, addr < end); } static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, - unsigned long end, bool free_mapped) + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) { unsigned long next; pmd_t *pmdp, pmd; @@ -833,16 +842,17 @@ static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, flush_tlb_kernel_range(addr, addr + PAGE_SIZE); if (free_mapped) free_hotplug_page_range(pmd_page(pmd), - PMD_SIZE); + PMD_SIZE, altmap); continue; } WARN_ON(!pmd_table(pmd)); - unmap_hotplug_pte_range(pmdp, addr, next, free_mapped); + unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); } while (addr = next, addr < end); } static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, - unsigned long end, bool free_mapped) + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) { unsigned long next; pud_t *pudp, pud; @@ -865,16 +875,17 @@ static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, flush_tlb_kernel_range(addr, addr + PAGE_SIZE); if (free_mapped) free_hotplug_page_range(pud_page(pud), - PUD_SIZE); + PUD_SIZE, altmap); continue; } WARN_ON(!pud_table(pud)); - unmap_hotplug_pmd_range(pudp, addr, next, free_mapped); + unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); } while (addr = next, addr < end); } static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, - unsigned long end, bool free_mapped) + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) { unsigned long next; p4d_t *p4dp, p4d; @@ -887,16 +898,24 @@ static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, continue; WARN_ON(!p4d_present(p4d)); - unmap_hotplug_pud_range(p4dp, addr, next, free_mapped); + unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); } while (addr = next, addr < end); } static void unmap_hotplug_range(unsigned long addr, unsigned long end, - bool free_mapped) + bool free_mapped, struct vmem_altmap *altmap) { unsigned long next; pgd_t *pgdp, pgd; + /* + * altmap can only be used as vmemmap mapping backing memory. + * In case the backing memory itself is not being freed, then + * altmap is irrelevant. Warn about this inconsistency when + * encountered. + */ + WARN_ON(!free_mapped && altmap); + do { next = pgd_addr_end(addr, end); pgdp = pgd_offset_k(addr); @@ -905,7 +924,7 @@ static void unmap_hotplug_range(unsigned long addr, unsigned long end, continue; WARN_ON(!pgd_present(pgd)); - unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped); + unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); } while (addr = next, addr < end); } @@ -1069,7 +1088,7 @@ static void free_empty_tables(unsigned long addr, unsigned long end, int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node); + return vmemmap_populate_basepages(start, end, node, altmap); } #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, @@ -1101,7 +1120,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, if (pmd_none(READ_ONCE(*pmdp))) { void *p = NULL; - p = vmemmap_alloc_block_buf(PMD_SIZE, node); + p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); if (!p) return -ENOMEM; @@ -1119,7 +1138,7 @@ void vmemmap_free(unsigned long start, unsigned long end, #ifdef CONFIG_MEMORY_HOTPLUG WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); - unmap_hotplug_range(start, end, true); + unmap_hotplug_range(start, end, true, altmap); free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); #endif } @@ -1410,7 +1429,7 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) WARN_ON(pgdir != init_mm.pgd); WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); - unmap_hotplug_range(start, end, false); + unmap_hotplug_range(start, end, false, NULL); free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); } |