summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/mmu.c
diff options
context:
space:
mode:
authorFuad Tabba <tabba@google.com>2020-04-01 15:03:10 +0100
committerMarc Zyngier <maz@kernel.org>2020-05-16 15:05:01 +0100
commit656012c731fcfd0f770007366e2b952a613745f2 (patch)
treec177174e71b3d0bd8a27be90337f0292c6b162de /arch/arm64/kvm/mmu.c
parentce6f8f02f9f6786355fa6c79d88b839639dd75d8 (diff)
downloadlwn-656012c731fcfd0f770007366e2b952a613745f2.tar.gz
lwn-656012c731fcfd0f770007366e2b952a613745f2.zip
KVM: Fix spelling in code comments
Fix spelling and typos (e.g., repeated words) in comments. Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200401140310.29701-1-tabba@google.com
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r--arch/arm64/kvm/mmu.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index e3b9ee268823..29d8f24df944 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -784,7 +784,7 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
mutex_lock(&kvm_hyp_pgd_mutex);
/*
- * This assumes that we we have enough space below the idmap
+ * This assumes that we have enough space below the idmap
* page to allocate our VAs. If not, the check below will
* kick. A potential alternative would be to detect that
* overflow and switch to an allocation above the idmap.
@@ -964,7 +964,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
* stage2_unmap_vm - Unmap Stage-2 RAM mappings
* @kvm: The struct kvm pointer
*
- * Go through the memregions and unmap any reguler RAM
+ * Go through the memregions and unmap any regular RAM
* backing memory already mapped to the VM.
*/
void stage2_unmap_vm(struct kvm *kvm)
@@ -2262,7 +2262,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
/*
* At this point memslot has been committed and there is an
- * allocated dirty_bitmap[], dirty pages will be be tracked while the
+ * allocated dirty_bitmap[], dirty pages will be tracked while the
* memory slot is write protected.
*/
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)