summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
blob: 0f728f05ea82f7dc8051ed812cfde79639ae2385 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2020, Google LLC.
 *
 * Tests for KVM paravirtual feature disablement
 */
#include <asm/kvm_para.h>
#include <linux/kvm_para.h>
#include <linux/stringify.h>
#include <stdint.h>

#include "apic.h"
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"

/* VMCALL and VMMCALL are both 3-byte opcodes. */
#define HYPERCALL_INSN_SIZE	3

static bool quirk_disabled;

static void guest_ud_handler(struct ex_regs *regs)
{
	regs->rax = -EFAULT;
	regs->rip += HYPERCALL_INSN_SIZE;
}

static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE]  = { 0x0f, 0x01, 0xc1 };
static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };

extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t do_sched_yield(uint8_t apic_id)
{
	uint64_t ret;

	asm volatile("hypercall_insn:\n\t"
		     ".byte 0xcc,0xcc,0xcc\n\t"
		     : "=a"(ret)
		     : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
		     : "memory");

	return ret;
}

static void guest_main(void)
{
	const uint8_t *native_hypercall_insn;
	const uint8_t *other_hypercall_insn;
	uint64_t ret;

	if (host_cpu_is_intel) {
		native_hypercall_insn = vmx_vmcall;
		other_hypercall_insn  = svm_vmmcall;
	} else if (host_cpu_is_amd) {
		native_hypercall_insn = svm_vmmcall;
		other_hypercall_insn  = vmx_vmcall;
	} else {
		GUEST_ASSERT(0);
		/* unreachable */
		return;
	}

	memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE);

	ret = do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)));

	/*
	 * If the quirk is disabled, verify that guest_ud_handler() "returned"
	 * -EFAULT and that KVM did NOT patch the hypercall.  If the quirk is
	 * enabled, verify that the hypercall succeeded and that KVM patched in
	 * the "right" hypercall.
	 */
	if (quirk_disabled) {
		GUEST_ASSERT(ret == (uint64_t)-EFAULT);
		GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn,
			     HYPERCALL_INSN_SIZE));
	} else {
		GUEST_ASSERT(!ret);
		GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn,
			     HYPERCALL_INSN_SIZE));
	}

	GUEST_DONE();
}

static void enter_guest(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	struct ucall uc;

	vcpu_run(vcpu);
	switch (get_ucall(vcpu, &uc)) {
	case UCALL_SYNC:
		pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
		break;
	case UCALL_DONE:
		return;
	case UCALL_ABORT:
		REPORT_GUEST_ASSERT(uc);
	default:
		TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
			  uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
	}
}

static void test_fix_hypercall(bool disable_quirk)
{
	struct kvm_vcpu *vcpu;
	struct kvm_vm *vm;

	vm = vm_create_with_one_vcpu(&vcpu, guest_main);

	vm_init_descriptor_tables(vcpu->vm);
	vcpu_init_descriptor_tables(vcpu);
	vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);

	if (disable_quirk)
		vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
			      KVM_X86_QUIRK_FIX_HYPERCALL_INSN);

	quirk_disabled = disable_quirk;
	sync_global_to_guest(vm, quirk_disabled);

	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);

	enter_guest(vcpu);
}

int main(void)
{
	TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);

	test_fix_hypercall(false);
	test_fix_hypercall(true);
}