summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorPunit Agrawal <punit.agrawal@arm.com>2018-12-11 17:10:35 +0000
committerMarc Zyngier <marc.zyngier@arm.com>2018-12-18 15:14:47 +0000
commit6396b852e46e562f4742ed0a9042b537eb26b8aa (patch)
tree7746e50553ff09b19ff1fabe800b4f912291bca0 /virt
parent3f58bf63455588a260b6abc8761c48bc8977b919 (diff)
downloadlwn-6396b852e46e562f4742ed0a9042b537eb26b8aa.tar.gz
lwn-6396b852e46e562f4742ed0a9042b537eb26b8aa.zip
KVM: arm/arm64: Re-factor setting the Stage 2 entry to exec on fault
Stage 2 fault handler marks a page as executable if it is handling an execution fault or if it was a permission fault in which case the executable bit needs to be preserved. The logic to decide if the page should be marked executable is duplicated for PMD and PTE entries. To avoid creating another copy when support for PUD hugepages is introduced refactor the code to share the checks needed to mark a page table entry as executable. Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/mmu.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 59595207c5e1..6912529946fb 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1475,7 +1475,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long fault_status)
{
int ret;
- bool write_fault, exec_fault, writable, force_pte = false;
+ bool write_fault, writable, force_pte = false;
+ bool exec_fault, needs_exec;
unsigned long mmu_seq;
gfn_t gfn = fault_ipa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
@@ -1598,19 +1599,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault)
invalidate_icache_guest_page(pfn, vma_pagesize);
+ /*
+ * If we took an execution fault we have made the
+ * icache/dcache coherent above and should now let the s2
+ * mapping be executable.
+ *
+ * Write faults (!exec_fault && FSC_PERM) are orthogonal to
+ * execute permissions, and we preserve whatever we have.
+ */
+ needs_exec = exec_fault ||
+ (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
+
if (vma_pagesize == PMD_SIZE) {
pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd);
if (writable)
new_pmd = kvm_s2pmd_mkwrite(new_pmd);
- if (exec_fault) {
+ if (needs_exec)
new_pmd = kvm_s2pmd_mkexec(new_pmd);
- } else if (fault_status == FSC_PERM) {
- /* Preserve execute if XN was already cleared */
- if (stage2_is_exec(kvm, fault_ipa))
- new_pmd = kvm_s2pmd_mkexec(new_pmd);
- }
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
@@ -1621,13 +1628,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mark_page_dirty(kvm, gfn);
}
- if (exec_fault) {
+ if (needs_exec)
new_pte = kvm_s2pte_mkexec(new_pte);
- } else if (fault_status == FSC_PERM) {
- /* Preserve execute if XN was already cleared */
- if (stage2_is_exec(kvm, fault_ipa))
- new_pte = kvm_s2pte_mkexec(new_pte);
- }
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
}