diff options
author | Becky Bruce <beckyb@kernel.crashing.org> | 2011-06-28 09:54:48 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-09-20 09:19:40 +1000 |
commit | 41151e77a4d96ea138cede6d84c955aa4769ce74 (patch) | |
tree | 2d997b77b9adf406a2fd30326bff688577d2e64f /arch/powerpc/include/asm | |
parent | 7df5659eefad9b6d457ccdee016bd78bd064cfc0 (diff) | |
download | lwn-41151e77a4d96ea138cede6d84c955aa4769ce74.tar.gz lwn-41151e77a4d96ea138cede6d84c955aa4769ce74.zip |
powerpc: Hugetlb for BookE
Enable hugepages on Freescale BookE processors. This allows the kernel to
use huge TLB entries to map pages, which can greatly reduce the number of
TLB misses and the amount of TLB thrashing experienced by applications with
large memory footprints. Care should be taken when using this on FSL
processors, as the number of large TLB entries supported by the core is low
(16-64) on current processors.
The supported set of hugepage sizes include 4m, 16m, 64m, 256m, and 1g.
Page sizes larger than the max zone size are called "gigantic" pages and
must be allocated on the command line (and cannot be deallocated).
This is currently only fully implemented for Freescale 32-bit BookE
processors, but there is some infrastructure in the code for
64-bit BooKE.
Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/hugetlb.h | 63 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-book3e.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu.h | 18 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page.h | 31 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page_64.h | 11 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-book3e.h | 3 |
7 files changed, 112 insertions, 24 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 5856a66ab404..86004930a78e 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -1,15 +1,60 @@ #ifndef _ASM_POWERPC_HUGETLB_H #define _ASM_POWERPC_HUGETLB_H +#ifdef CONFIG_HUGETLB_PAGE #include <asm/page.h> +extern struct kmem_cache *hugepte_cache; +extern void __init reserve_hugetlb_gpages(void); + +static inline pte_t *hugepd_page(hugepd_t hpd) +{ + BUG_ON(!hugepd_ok(hpd)); + return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); +} + +static inline unsigned int hugepd_shift(hugepd_t hpd) +{ + return hpd.pd & HUGEPD_SHIFT_MASK; +} + +static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, + unsigned pdshift) +{ + /* + * On 32-bit, we have multiple higher-level table entries that point to + * the same hugepte. Just use the first one since they're all + * identical. So for that case, idx=0. + */ + unsigned long idx = 0; + + pte_t *dir = hugepd_page(*hpdp); +#ifdef CONFIG_PPC64 + idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); +#endif + + return dir + idx; +} + pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, unsigned long addr, unsigned *shift); void flush_dcache_icache_hugepage(struct page *page); +#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT) int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); +#else +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + return 0; +} +#endif + +void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte); +void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, @@ -50,8 +95,11 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1); - return __pte(old); +#ifdef CONFIG_PPC64 + return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); +#else + return __pte(pte_update(ptep, ~0UL, 0)); +#endif } static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, @@ -93,4 +141,15 @@ static inline void arch_release_hugepage(struct page *page) { } +#else /* ! CONFIG_HUGETLB_PAGE */ +static inline void reserve_hugetlb_gpages(void) +{ + pr_err("Cannot reserve gpages without hugetlb enabled\n"); +} +static inline void flush_hugetlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} +#endif + #endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 3ea0f9a259d8..0260ea5ec3c2 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -66,6 +66,7 @@ #define MAS2_M 0x00000004 #define MAS2_G 0x00000002 #define MAS2_E 0x00000001 +#define MAS2_WIMGE_MASK 0x0000001f #define MAS2_EPN_MASK(size) (~0 << (size + 10)) #define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) @@ -80,6 +81,7 @@ #define MAS3_SW 0x00000004 #define MAS3_UR 0x00000002 #define MAS3_SR 0x00000001 +#define MAS3_BAP_MASK 0x0000003f #define MAS3_SPSIZE 0x0000003e #define MAS3_SPSIZE_SHIFT 1 @@ -212,6 +214,11 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; +#ifdef CONFIG_PPC_MM_SLICES + u64 low_slices_psize; /* SLB page size encodings */ + u64 high_slices_psize; /* 4 bits per slice for now */ + u16 user_psize; /* page size index */ +#endif } mm_context_t; /* Page size definitions, common between 32 and 64-bit diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index b445e0af4c2b..db645ec842bd 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -262,8 +262,7 @@ extern void hash_failure_debug(unsigned long ea, unsigned long access, extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long prot, int psize, int ssize); -extern void add_gpage(unsigned long addr, unsigned long page_size, - unsigned long number_of_pages); +extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); extern void hpte_init_native(void); diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 698b30638681..f0145522cfba 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -175,14 +175,16 @@ extern u64 ppc64_rma_size; #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ #define MMU_PAGE_256K 4 #define MMU_PAGE_1M 5 -#define MMU_PAGE_8M 6 -#define MMU_PAGE_16M 7 -#define MMU_PAGE_256M 8 -#define MMU_PAGE_1G 9 -#define MMU_PAGE_16G 10 -#define MMU_PAGE_64G 11 -#define MMU_PAGE_COUNT 12 - +#define MMU_PAGE_4M 6 +#define MMU_PAGE_8M 7 +#define MMU_PAGE_16M 8 +#define MMU_PAGE_64M 9 +#define MMU_PAGE_256M 10 +#define MMU_PAGE_1G 11 +#define MMU_PAGE_16G 12 +#define MMU_PAGE_64G 13 + +#define MMU_PAGE_COUNT 14 #if defined(CONFIG_PPC_STD_MMU_64) /* 64-bit classic hash table MMU */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 2cd664ef0a5e..dd9c4fd038e0 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -36,6 +36,18 @@ #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) +#ifndef __ASSEMBLY__ +#ifdef CONFIG_HUGETLB_PAGE +extern unsigned int HPAGE_SHIFT; +#else +#define HPAGE_SHIFT PAGE_SHIFT +#endif +#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) +#endif + /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ #define __HAVE_ARCH_GATE_AREA 1 @@ -158,6 +170,24 @@ extern phys_addr_t kernstart_addr; #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) #endif +/* + * Use the top bit of the higher-level page table entries to indicate whether + * the entries we point to contain hugepages. This works because we know that + * the page tables live in kernel space. If we ever decide to support having + * page tables at arbitrary addresses, this breaks and will have to change. + */ +#ifdef CONFIG_PPC64 +#define PD_HUGE 0x8000000000000000 +#else +#define PD_HUGE 0x80000000 +#endif + +/* + * Some number of bits at the level of the page table that points to + * a hugepte are used to encode the size. This masks those bits. + */ +#define HUGEPD_SHIFT_MASK 0x3f + #ifndef __ASSEMBLY__ #undef STRICT_MM_TYPECHECKS @@ -243,7 +273,6 @@ typedef unsigned long pgprot_t; #endif typedef struct { signed long pd; } hugepd_t; -#define HUGEPD_SHIFT_MASK 0x3f #ifdef CONFIG_HUGETLB_PAGE static inline int hugepd_ok(hugepd_t hpd) diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 9356262fd3cc..fb40ede6bc0d 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -64,17 +64,6 @@ extern void copy_page(void *to, void *from); /* Log 2 of page table size */ extern u64 ppc64_pft_size; -/* Large pages size */ -#ifdef CONFIG_HUGETLB_PAGE -extern unsigned int HPAGE_SHIFT; -#else -#define HPAGE_SHIFT PAGE_SHIFT -#endif -#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) - #endif /* __ASSEMBLY__ */ #ifdef CONFIG_PPC_MM_SLICES diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h index 082d515930a2..0156702ba24e 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/pte-book3e.h @@ -72,6 +72,9 @@ #define PTE_RPN_SHIFT (24) #endif +#define PTE_WIMGE_SHIFT (19) +#define PTE_BAP_SHIFT (2) + /* On 32-bit, we never clear the top part of the PTE */ #ifdef CONFIG_PPC32 #define _PTE_NONE_MASK 0xffffffff00000000ULL |