diff options
author | Zhang Jianhua <chris.zjh@huawei.com> | 2023-08-04 15:56:15 +0800 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2023-08-04 17:19:44 +0100 |
commit | 4e0bacd65e72f7f9b1b60e48e85e6d4187474644 (patch) | |
tree | 4d942f6e70525e99058be084454dfe391e41ec6a | |
parent | 42501f6d4d5d4e5b69fa083193c8247918e8c393 (diff) | |
download | lwn-4e0bacd65e72f7f9b1b60e48e85e6d4187474644.tar.gz lwn-4e0bacd65e72f7f9b1b60e48e85e6d4187474644.zip |
arm64: fix build warning for ARM64_MEMSTART_SHIFT
When building with W=1, the following warning occurs.
arch/arm64/include/asm/kernel-pgtable.h:129:41: error: "PUD_SHIFT" is not defined, evaluates to 0 [-Werror=undef]
129 | #define ARM64_MEMSTART_SHIFT PUD_SHIFT
| ^~~~~~~~~
arch/arm64/include/asm/kernel-pgtable.h:142:5: note: in expansion of macro ‘ARM64_MEMSTART_SHIFT’
142 | #if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
| ^~~~~~~~~~~~~~~~~~~~
The generic PUD_SHIFT was defined in include/asm-generic/pgtable-nopud.h,
however the #ifndef __ASSEMBLY__ guard in this header file makes it unavailable
for assembly files. While someone .S file include the <asm/kernel-pgtable.h>,
the build warning would occur. Now move the macro ARM64_MEMSTART_SHIFT and
ARM64_MEMSTART_ALIGN to arch/arm64/mm/init.c where it is used only, to avoid
this issue.
Signed-off-by: Zhang Jianhua <chris.zjh@huawei.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20230804075615.3334756-1-chris.zjh@huawei.com
Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r-- | arch/arm64/include/asm/kernel-pgtable.h | 27 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 27 |
2 files changed, 27 insertions, 27 deletions
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 577773870b66..85d26143faa5 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -118,31 +118,4 @@ #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY) #endif -/* - * To make optimal use of block mappings when laying out the linear - * mapping, round down the base of physical memory to a size that can - * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE - * (64k granule), or a multiple that can be mapped using contiguous bits - * in the page tables: 32 * PMD_SIZE (16k granule) - */ -#if defined(CONFIG_ARM64_4K_PAGES) -#define ARM64_MEMSTART_SHIFT PUD_SHIFT -#elif defined(CONFIG_ARM64_16K_PAGES) -#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT -#else -#define ARM64_MEMSTART_SHIFT PMD_SHIFT -#endif - -/* - * sparsemem vmemmap imposes an additional requirement on the alignment of - * memstart_addr, due to the fact that the base of the vmemmap region - * has a direct correspondence, and needs to appear sufficiently aligned - * in the virtual address space. - */ -#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS -#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) -#else -#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) -#endif - #endif /* __ASM_KERNEL_PGTABLE_H */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index d31c3a9290c5..4fcb88a445ef 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -73,6 +73,33 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit; #define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) +/* + * To make optimal use of block mappings when laying out the linear + * mapping, round down the base of physical memory to a size that can + * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE + * (64k granule), or a multiple that can be mapped using contiguous bits + * in the page tables: 32 * PMD_SIZE (16k granule) + */ +#if defined(CONFIG_ARM64_4K_PAGES) +#define ARM64_MEMSTART_SHIFT PUD_SHIFT +#elif defined(CONFIG_ARM64_16K_PAGES) +#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT +#else +#define ARM64_MEMSTART_SHIFT PMD_SHIFT +#endif + +/* + * sparsemem vmemmap imposes an additional requirement on the alignment of + * memstart_addr, due to the fact that the base of the vmemmap region + * has a direct correspondence, and needs to appear sufficiently aligned + * in the virtual address space. + */ +#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS +#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) +#else +#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) +#endif + static int __init reserve_crashkernel_low(unsigned long long low_size) { unsigned long long low_base; |