summaryrefslogtreecommitdiff
path: root/arch/s390/kvm/gmap.c
blob: 6d8944d1b4a0cd7efc9d5280bc94877d828cdbdd (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
// SPDX-License-Identifier: GPL-2.0
/*
 * Guest memory management for KVM/s390
 *
 * Copyright IBM Corp. 2008, 2020, 2024
 *
 *    Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
 *               Martin Schwidefsky <schwidefsky@de.ibm.com>
 *               David Hildenbrand <david@redhat.com>
 *               Janosch Frank <frankja@linux.vnet.ibm.com>
 */

#include <linux/compiler.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/pgtable.h>
#include <linux/pagemap.h>

#include <asm/lowcore.h>
#include <asm/gmap.h>
#include <asm/uv.h>

#include "gmap.h"

/**
 * gmap_make_secure() - make one guest page secure
 * @gmap: the guest gmap
 * @gaddr: the guest address that needs to be made secure
 * @uvcb: the UVCB specifying which operation needs to be performed
 *
 * Context: needs to be called with kvm->srcu held.
 * Return: 0 on success, < 0 in case of error.
 */
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
{
	struct kvm *kvm = gmap->private;
	unsigned long vmaddr;

	lockdep_assert_held(&kvm->srcu);

	vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
	if (kvm_is_error_hva(vmaddr))
		return -EFAULT;
	return make_hva_secure(gmap->mm, vmaddr, uvcb);
}

int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
{
	struct uv_cb_cts uvcb = {
		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
		.header.len = sizeof(uvcb),
		.guest_handle = gmap->guest_handle,
		.gaddr = gaddr,
	};

	return gmap_make_secure(gmap, gaddr, &uvcb);
}

/**
 * __gmap_destroy_page() - Destroy a guest page.
 * @gmap: the gmap of the guest
 * @page: the page to destroy
 *
 * An attempt will be made to destroy the given guest page. If the attempt
 * fails, an attempt is made to export the page. If both attempts fail, an
 * appropriate error is returned.
 *
 * Context: must be called holding the mm lock for gmap->mm
 */
static int __gmap_destroy_page(struct gmap *gmap, struct page *page)
{
	struct folio *folio = page_folio(page);
	int rc;

	/*
	 * See gmap_make_secure(): large folios cannot be secure. Small
	 * folio implies FW_LEVEL_PTE.
	 */
	if (folio_test_large(folio))
		return -EFAULT;

	rc = uv_destroy_folio(folio);
	/*
	 * Fault handlers can race; it is possible that two CPUs will fault
	 * on the same secure page. One CPU can destroy the page, reboot,
	 * re-enter secure mode and import it, while the second CPU was
	 * stuck at the beginning of the handler. At some point the second
	 * CPU will be able to progress, and it will not be able to destroy
	 * the page. In that case we do not want to terminate the process,
	 * we instead try to export the page.
	 */
	if (rc)
		rc = uv_convert_from_secure_folio(folio);

	return rc;
}

/**
 * gmap_destroy_page() - Destroy a guest page.
 * @gmap: the gmap of the guest
 * @gaddr: the guest address to destroy
 *
 * An attempt will be made to destroy the given guest page. If the attempt
 * fails, an attempt is made to export the page. If both attempts fail, an
 * appropriate error is returned.
 *
 * Context: may sleep.
 */
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
{
	struct page *page;
	int rc = 0;

	mmap_read_lock(gmap->mm);
	page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr));
	if (page)
		rc = __gmap_destroy_page(gmap, page);
	kvm_release_page_clean(page);
	mmap_read_unlock(gmap->mm);
	return rc;
}