summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-09-19 13:24:41 +0200
committerIngo Molnar <mingo@kernel.org>2019-04-03 10:32:47 +0200
commit96bc9567cbe112e9320250f01b9c060c882e8619 (patch)
tree7948ca705fc25538b784b4f28dd62a2c5ec88b97
parent8b6dd0c47894e2c190560914318aa4181bc8c2f2 (diff)
downloadlwn-96bc9567cbe112e9320250f01b9c060c882e8619.tar.gz
lwn-96bc9567cbe112e9320250f01b9c060c882e8619.zip
asm-generic/tlb, arch: Invert CONFIG_HAVE_RCU_TABLE_INVALIDATE
Make issuing a TLB invalidate for page-table pages the normal case. The reason is twofold: - too many invalidates is safer than too few, - most architectures use the linux page-tables natively and would thus require this. Make it an opt-out, instead of an opt-in. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--include/asm-generic/tlb.h9
-rw-r--r--mm/mmu_gather.c2
7 files changed, 9 insertions, 8 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index cdc7f3d5d278..04b3e8b94cfe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -383,7 +383,7 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
config HAVE_RCU_TABLE_FREE
bool
-config HAVE_RCU_TABLE_INVALIDATE
+config HAVE_RCU_TABLE_NO_INVALIDATE
bool
config HAVE_MMU_GATHER_PAGE_SIZE
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7e34b9eba5de..78d9fafac983 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -149,7 +149,6 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
- select HAVE_RCU_TABLE_INVALIDATE
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a7aa4feabc09..8e1e2abf17eb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -218,6 +218,7 @@ config PPC
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 40f8f4f73fe8..db79290ed6d5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -63,6 +63,7 @@ config SPARC64
select HAVE_KRETPROBES
select HAVE_KPROBES
select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5ad92419be19..b0f30d86c23f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -183,7 +183,6 @@ config X86
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if PARAVIRT
- select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 1c861989b704..81799e6a4304 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -135,11 +135,12 @@
* When used, an architecture is expected to provide __tlb_remove_table()
* which does the actual freeing of these pages.
*
- * HAVE_RCU_TABLE_INVALIDATE
+ * HAVE_RCU_TABLE_NO_INVALIDATE
*
- * This makes HAVE_RCU_TABLE_FREE call tlb_flush_mmu_tlbonly() before freeing
- * the page-table pages. Required if you use HAVE_RCU_TABLE_FREE and your
- * architecture uses the Linux page-tables natively.
+ * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
+ * freeing the page-table pages. This can be avoided if you use
+ * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
+ * page-tables natively.
*
* MMU_GATHER_NO_RANGE
*
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 14dfc97155e4..2a5322d52b0a 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -157,7 +157,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
*/
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
{
-#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
+#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
/*
* Invalidate page-table caches used by hardware walkers. Then we still
* need to RCU-sched wait while freeing the pages because software