summaryrefslogtreecommitdiff
path: root/include/asm-generic/tlb.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-02-03 17:37:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-02-04 03:05:26 +0000
commit580a586c409ab3040b7284a19cd9e281692c40c7 (patch)
treea5122b8864be12bd79e342fa5412d7320dc6cc29 /include/asm-generic/tlb.h
parent3af4bd033759c4dab4f0ff594f0aa1e8d182b9d7 (diff)
downloadlwn-580a586c409ab3040b7284a19cd9e281692c40c7.tar.gz
lwn-580a586c409ab3040b7284a19cd9e281692c40c7.zip
asm-generic/tlb: rename HAVE_MMU_GATHER_NO_GATHER
Towards a more consistent naming scheme. Link: http://lkml.kernel.org/r/20200116064531.483522-9-aneesh.kumar@linux.ibm.com Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic/tlb.h')
-rw-r--r--include/asm-generic/tlb.h14
1 files changed, 12 insertions, 2 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 53befa5acb27..ca0fe75b5355 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -143,6 +143,16 @@
* MMU_GATHER_NO_RANGE
*
* Use this if your architecture lacks an efficient flush_tlb_range().
+ *
+ * MMU_GATHER_NO_GATHER
+ *
+ * If the option is set the mmu_gather will not track individual pages for
+ * delayed page free anymore. A platform that enables the option needs to
+ * provide its own implementation of the __tlb_remove_page_size() function to
+ * free pages.
+ *
+ * This is useful if your architecture already flushes TLB entries in the
+ * various ptep_get_and_clear() functions.
*/
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
@@ -202,7 +212,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
@@ -277,7 +287,7 @@ struct mmu_gather {
unsigned int batch_count;
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];