From 4f5b0c1789963477cc9a4d45b4b62d694665cceb Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 14 Dec 2020 19:09:59 -0800 Subject: arm, arm64: move free_unused_memmap() to generic mm ARM and ARM64 free unused parts of the memory map just before the initialization of the page allocator. To allow holes in the memory map both architectures overload pfn_valid() and define HAVE_ARCH_PFN_VALID. Allowing holes in the memory map for FLATMEM may be useful for small machines, such as ARC and m68k and will enable those architectures to cease using DISCONTIGMEM and still support more than one memory bank. Move the functions that free unused memory map to generic mm and enable them in case HAVE_ARCH_PFN_VALID=y. Link: https://lkml.kernel.org/r/20201101170454.9567-10-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Catalin Marinas [arm64] Cc: Alexey Dobriyan Cc: Geert Uytterhoeven Cc: Greg Ungerer Cc: John Paul Adrian Glaubitz Cc: Jonathan Corbet Cc: Matt Turner Cc: Meelis Roos Cc: Michael Schmitz Cc: Russell King Cc: Tony Luck Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/init.c | 68 ---------------------------------------------------- 1 file changed, 68 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 095540667f0f..3d8328277bec 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -430,71 +430,6 @@ void __init bootmem_init(void) memblock_dump_all(); } -#ifndef CONFIG_SPARSEMEM_VMEMMAP -static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) -{ - struct page *start_pg, *end_pg; - unsigned long pg, pgend; - - /* - * Convert start_pfn/end_pfn to a struct page pointer. - */ - start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn - 1) + 1; - - /* - * Convert to physical addresses, and round start upwards and end - * downwards. - */ - pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); - pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; - - /* - * If there are free pages between these, free the section of the - * memmap array. - */ - if (pg < pgend) - memblock_free(pg, pgend - pg); -} - -/* - * The mem_map array can get very big. Free the unused area of the memory map. - */ -static void __init free_unused_memmap(void) -{ - unsigned long start, end, prev_end = 0; - int i; - - for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { -#ifdef CONFIG_SPARSEMEM - /* - * Take care not to free memmap entries that don't exist due - * to SPARSEMEM sections which aren't present. - */ - start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); -#endif - /* - * If we had a previous bank, and there is a space between the - * current bank and the previous, free it. - */ - if (prev_end && prev_end < start) - free_memmap(prev_end, start); - - /* - * Align up here since the VM subsystem insists that the - * memmap entries are valid from the bank end aligned to - * MAX_ORDER_NR_PAGES. - */ - prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); - } - -#ifdef CONFIG_SPARSEMEM - if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) - free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); -#endif -} -#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ - /* * mem_init() marks the free areas in the mem_map and tells us how much memory * is free. This is done after various parts of the system have claimed their @@ -510,9 +445,6 @@ void __init mem_init(void) set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); -#ifndef CONFIG_SPARSEMEM_VMEMMAP - free_unused_memmap(); -#endif /* this will put all unused low memory onto the freelists */ memblock_free_all(); -- cgit v1.2.3 From 5d6ad668f31625c6aa9ed8dc3bdb29561d2b1144 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 14 Dec 2020 19:10:30 -0800 Subject: arch, mm: restore dependency of __kernel_map_pages() on DEBUG_PAGEALLOC The design of DEBUG_PAGEALLOC presumes that __kernel_map_pages() must never fail. With this assumption is wouldn't be safe to allow general usage of this function. Moreover, some architectures that implement __kernel_map_pages() have this function guarded by #ifdef DEBUG_PAGEALLOC and some refuse to map/unmap pages when page allocation debugging is disabled at runtime. As all the users of __kernel_map_pages() were converted to use debug_pagealloc_map_pages() it is safe to make it available only when DEBUG_PAGEALLOC is set. Link: https://lkml.kernel.org/r/20201109192128.960-4-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: David Hildenbrand Acked-by: Kirill A. Shutemov Cc: Albert Ou Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Lameter Cc: Dave Hansen Cc: David Rientjes Cc: "David S. Miller" Cc: "Edgecombe, Rick P" Cc: Heiko Carstens Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Joonsoo Kim Cc: Len Brown Cc: Michael Ellerman Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Pavel Machek Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 3 +++ arch/arm64/Kconfig | 4 +--- arch/arm64/mm/pageattr.c | 8 ++++++-- arch/powerpc/Kconfig | 5 +---- arch/riscv/Kconfig | 4 +--- arch/riscv/include/asm/pgtable.h | 2 -- arch/riscv/mm/pageattr.c | 2 ++ arch/s390/Kconfig | 4 +--- arch/sparc/Kconfig | 4 +--- arch/x86/Kconfig | 4 +--- arch/x86/mm/pat/set_memory.c | 2 ++ include/linux/mm.h | 10 +++++++--- 12 files changed, 26 insertions(+), 26 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/Kconfig b/arch/Kconfig index 8d5efff59cd8..cd4172a80123 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1047,6 +1047,9 @@ config ARCH_WANT_LD_ORPHAN_WARN config HAVE_ARCH_PFN_VALID bool +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6f0751631ef1..f2dee598a921 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -71,6 +71,7 @@ config ARM64 select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_SYM_ANNOTATIONS + select ARCH_SUPPORTS_DEBUG_PAGEALLOC select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK select ARCH_SUPPORTS_ATOMIC_RMW @@ -1028,9 +1029,6 @@ config HOLES_IN_ZONE source "kernel/Kconfig.hz" -config ARCH_SUPPORTS_DEBUG_PAGEALLOC - def_bool y - config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_VMEMMAP_ENABLE diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 1b94f5b82654..439325532be1 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -155,7 +155,7 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(PTE_VALID), }; - if (!rodata_full) + if (!debug_pagealloc_enabled() && !rodata_full) return 0; return apply_to_page_range(&init_mm, @@ -170,7 +170,7 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(PTE_RDONLY), }; - if (!rodata_full) + if (!debug_pagealloc_enabled() && !rodata_full) return 0; return apply_to_page_range(&init_mm, @@ -178,6 +178,7 @@ int set_direct_map_default_noflush(struct page *page) PAGE_SIZE, change_page_range, &data); } +#ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { if (!debug_pagealloc_enabled() && !rodata_full) @@ -186,6 +187,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) set_memory_valid((unsigned long)page_address(page), numpages, enable); } +#ifdef CONFIG_HIBERNATION /* * This function is used to determine if a linear map page has been marked as * not-valid. Walk the page table and check the PTE_VALID bit. This is based @@ -232,3 +234,5 @@ bool kernel_page_present(struct page *page) ptep = pte_offset_kernel(pmdp, addr); return pte_valid(READ_ONCE(*ptep)); } +#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_DEBUG_PAGEALLOC */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5181872f9452..8e8f46bbd39c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -146,6 +146,7 @@ config PPC select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS @@ -356,10 +357,6 @@ config PPC_OF_PLATFORM_PCI depends on PCI depends on PPC64 # not supported on 32 bits yet -config ARCH_SUPPORTS_DEBUG_PAGEALLOC - depends on PPC32 || PPC_BOOK3S_64 - def_bool y - config ARCH_SUPPORTS_UPROBES def_bool y diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 44377fd7860e..9283c6f9ae2a 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -14,6 +14,7 @@ config RISCV def_bool y select ARCH_CLOCKSOURCE_INIT select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VIRTUAL if MMU @@ -153,9 +154,6 @@ config ARCH_SELECT_MEMORY_MODEL config ARCH_WANT_GENERAL_HUGETLB def_bool y -config ARCH_SUPPORTS_DEBUG_PAGEALLOC - def_bool y - config SYS_SUPPORTS_HUGETLBFS depends on MMU def_bool y diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 183f1f4b2ae6..41a72861987c 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -461,8 +461,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define VMALLOC_START 0 #define VMALLOC_END TASK_SIZE -static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {} - #endif /* !CONFIG_MMU */ #define kern_addr_valid(addr) (1) /* FIXME */ diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 19fecb362d81..321b09d2e2ea 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -184,6 +184,7 @@ int set_direct_map_default_noflush(struct page *page) return ret; } +#ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { if (!debug_pagealloc_enabled()) @@ -196,3 +197,4 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) __set_memory((unsigned long)page_address(page), numpages, __pgprot(0), __pgprot(_PAGE_PRESENT)); } +#endif diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 4a2a12be04c9..991a850a6c0b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -35,9 +35,6 @@ config GENERIC_LOCKBREAK config PGSTE def_bool y if KVM -config ARCH_SUPPORTS_DEBUG_PAGEALLOC - def_bool y - config AUDIT_ARCH def_bool y @@ -106,6 +103,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEBUG_PAGEALLOC select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index a6ca135442f9..2c729b8d097a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -88,6 +88,7 @@ config SPARC64 select HAVE_C_RECORDMCOUNT select HAVE_ARCH_AUDITSYSCALL select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEBUG_PAGEALLOC select HAVE_NMI select HAVE_REGS_AND_STACK_ACCESS_API select ARCH_USE_QUEUED_RWLOCKS @@ -148,9 +149,6 @@ config GENERIC_ISA_DMA bool default y if SPARC32 -config ARCH_SUPPORTS_DEBUG_PAGEALLOC - def_bool y if SPARC64 - config PGTABLE_LEVELS default 4 if 64BIT default 3 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 46393177ce73..384c91b057c5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -91,6 +91,7 @@ config X86 select ARCH_STACKWALK select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEBUG_PAGEALLOC select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS @@ -331,9 +332,6 @@ config ZONE_DMA32 config AUDIT_ARCH def_bool y if X86_64 -config ARCH_SUPPORTS_DEBUG_PAGEALLOC - def_bool y - config KASAN_SHADOW_OFFSET hex depends on KASAN diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 40baa90e74f4..bc9be96b777f 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2194,6 +2194,7 @@ int set_direct_map_default_noflush(struct page *page) return __set_pages_p(page, 1); } +#ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { if (PageHighMem(page)) @@ -2239,6 +2240,7 @@ bool kernel_page_present(struct page *page) return (pte_val(*pte) & _PAGE_PRESENT); } #endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_DEBUG_PAGEALLOC */ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, unsigned numpages, unsigned long page_flags) diff --git a/include/linux/mm.h b/include/linux/mm.h index 6b5df31387b5..7a67bdf3df5e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2931,7 +2931,11 @@ static inline bool debug_pagealloc_enabled_static(void) return static_branch_unlikely(&_debug_pagealloc_enabled); } -#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) +#ifdef CONFIG_DEBUG_PAGEALLOC +/* + * To support DEBUG_PAGEALLOC architecture must ensure that + * __kernel_map_pages() never fails + */ extern void __kernel_map_pages(struct page *page, int numpages, int enable); static inline void debug_pagealloc_map_pages(struct page *page, int numpages) @@ -2949,13 +2953,13 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); #endif /* CONFIG_HIBERNATION */ -#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +#else /* CONFIG_DEBUG_PAGEALLOC */ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} #ifdef CONFIG_HIBERNATION static inline bool kernel_page_present(struct page *page) { return true; } #endif /* CONFIG_HIBERNATION */ -#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +#endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); -- cgit v1.2.3 From 32a0de886eb3cb7e6990da27a9cdfa50baa8be64 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 14 Dec 2020 19:10:35 -0800 Subject: arch, mm: make kernel_page_present() always available For architectures that enable ARCH_HAS_SET_MEMORY having the ability to verify that a page is mapped in the kernel direct map can be useful regardless of hibernation. Add RISC-V implementation of kernel_page_present(), update its forward declarations and stubs to be a part of set_memory API and remove ugly ifdefery in inlcude/linux/mm.h around current declarations of kernel_page_present(). Link: https://lkml.kernel.org/r/20201109192128.960-5-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Kirill A. Shutemov Cc: Albert Ou Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Lameter Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: "David S. Miller" Cc: "Edgecombe, Rick P" Cc: Heiko Carstens Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Joonsoo Kim Cc: Len Brown Cc: Michael Ellerman Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Pavel Machek Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/cacheflush.h | 1 + arch/arm64/mm/pageattr.c | 4 +--- arch/riscv/include/asm/set_memory.h | 1 + arch/riscv/mm/pageattr.c | 29 +++++++++++++++++++++++++++++ arch/x86/include/asm/set_memory.h | 1 + arch/x86/mm/pat/set_memory.c | 4 +--- include/linux/mm.h | 7 ------- include/linux/set_memory.h | 5 +++++ 8 files changed, 39 insertions(+), 13 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 9384fd8fc13c..45217f21f1fe 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -140,6 +140,7 @@ int set_memory_valid(unsigned long addr, int numpages, int enable); int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +bool kernel_page_present(struct page *page); #include diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 439325532be1..92eccaf595c8 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -186,8 +186,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) set_memory_valid((unsigned long)page_address(page), numpages, enable); } +#endif /* CONFIG_DEBUG_PAGEALLOC */ -#ifdef CONFIG_HIBERNATION /* * This function is used to determine if a linear map page has been marked as * not-valid. Walk the page table and check the PTE_VALID bit. This is based @@ -234,5 +234,3 @@ bool kernel_page_present(struct page *page) ptep = pte_offset_kernel(pmdp, addr); return pte_valid(READ_ONCE(*ptep)); } -#endif /* CONFIG_HIBERNATION */ -#endif /* CONFIG_DEBUG_PAGEALLOC */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 4c5bae7ca01c..d690b08dff2a 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -24,6 +24,7 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +bool kernel_page_present(struct page *page); #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 321b09d2e2ea..87ba5a68bbb8 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -198,3 +198,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) __pgprot(0), __pgprot(_PAGE_PRESENT)); } #endif + +bool kernel_page_present(struct page *page) +{ + unsigned long addr = (unsigned long)page_address(page); + pgd_t *pgd; + pud_t *pud; + p4d_t *p4d; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset_k(addr); + if (!pgd_present(*pgd)) + return false; + + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return false; + + pud = pud_offset(p4d, addr); + if (!pud_present(*pud)) + return false; + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + return false; + + pte = pte_offset_kernel(pmd, addr); + return pte_present(*pte); +} diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index 5948218f35c5..4352f08bfbb5 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -82,6 +82,7 @@ int set_pages_rw(struct page *page, int numpages); int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +bool kernel_page_present(struct page *page); extern int kernel_set_to_readonly; diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index bc9be96b777f..16f878c26667 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2226,8 +2226,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) arch_flush_lazy_mmu_mode(); } +#endif /* CONFIG_DEBUG_PAGEALLOC */ -#ifdef CONFIG_HIBERNATION bool kernel_page_present(struct page *page) { unsigned int level; @@ -2239,8 +2239,6 @@ bool kernel_page_present(struct page *page) pte = lookup_address((unsigned long)page_address(page), &level); return (pte_val(*pte) & _PAGE_PRESENT); } -#endif /* CONFIG_HIBERNATION */ -#endif /* CONFIG_DEBUG_PAGEALLOC */ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, unsigned numpages, unsigned long page_flags) diff --git a/include/linux/mm.h b/include/linux/mm.h index 7a67bdf3df5e..5ec79757eeae 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2949,16 +2949,9 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 0); } - -#ifdef CONFIG_HIBERNATION -extern bool kernel_page_present(struct page *page); -#endif /* CONFIG_HIBERNATION */ #else /* CONFIG_DEBUG_PAGEALLOC */ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} -#ifdef CONFIG_HIBERNATION -static inline bool kernel_page_present(struct page *page) { return true; } -#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 860e0f843c12..fe1aa4e54680 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -23,6 +23,11 @@ static inline int set_direct_map_default_noflush(struct page *page) { return 0; } + +static inline bool kernel_page_present(struct page *page) +{ + return true; +} #endif #ifndef set_mce_nospec -- cgit v1.2.3