summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_mmu.h
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2012-11-01 17:14:45 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2013-10-17 17:06:20 -0700
commitad361f093c1e31d0b43946210a32ab4ff5c49850 (patch)
tree2bfef51798c8e6a916c9a961dd293fd919b0258a /arch/arm64/include/asm/kvm_mmu.h
parent86ed81aa2e1ce05a4e7f0819f0dfc34e8d8fb910 (diff)
downloadlwn-ad361f093c1e31d0b43946210a32ab4ff5c49850.tar.gz
lwn-ad361f093c1e31d0b43946210a32ab4ff5c49850.zip
KVM: ARM: Support hugetlbfs backed huge pages
Support huge pages in KVM/ARM and KVM/ARM64. The pud_huge checking on the unmap path may feel a bit silly as the pud_huge check is always defined to false, but the compiler should be smart about this. Note: This deals only with VMAs marked as huge which are allocated by users through hugetlbfs only. Transparent huge pages can only be detected by looking at the underlying pages (or the page tables themselves) and this patch so far simply maps these on a page-by-page level in the Stage-2 page tables. Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64/include/asm/kvm_mmu.h')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h12
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index efe609c6a3c9..680f74e67497 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -91,6 +91,7 @@ int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
+#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
static inline bool kvm_is_write_fault(unsigned long esr)
{
@@ -116,13 +117,18 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= PTE_S2_RDWR;
}
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+ pmd_val(*pmd) |= PMD_S2_RDWR;
+}
+
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+ unsigned long size)
{
if (!icache_is_aliasing()) { /* PIPT */
- unsigned long hva = gfn_to_hva(kvm, gfn);
- flush_icache_range(hva, hva + PAGE_SIZE);
+ flush_icache_range(hva, hva + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();