summaryrefslogtreecommitdiff
path: root/arch/s390/kvm/gmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/gmap.c')
-rw-r--r--arch/s390/kvm/gmap.c103
1 files changed, 6 insertions, 97 deletions
diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c
index 02adf151d4de..6d8944d1b4a0 100644
--- a/arch/s390/kvm/gmap.c
+++ b/arch/s390/kvm/gmap.c
@@ -23,116 +23,25 @@
#include "gmap.h"
/**
- * should_export_before_import - Determine whether an export is needed
- * before an import-like operation
- * @uvcb: the Ultravisor control block of the UVC to be performed
- * @mm: the mm of the process
- *
- * Returns whether an export is needed before every import-like operation.
- * This is needed for shared pages, which don't trigger a secure storage
- * exception when accessed from a different guest.
- *
- * Although considered as one, the Unpin Page UVC is not an actual import,
- * so it is not affected.
- *
- * No export is needed also when there is only one protected VM, because the
- * page cannot belong to the wrong VM in that case (there is no "other VM"
- * it can belong to).
- *
- * Return: true if an export is needed before every import, otherwise false.
- */
-static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
-{
- /*
- * The misc feature indicates, among other things, that importing a
- * shared page from a different protected VM will automatically also
- * transfer its ownership.
- */
- if (uv_has_feature(BIT_UV_FEAT_MISC))
- return false;
- if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
- return false;
- return atomic_read(&mm->context.protected_count) > 1;
-}
-
-static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
-{
- struct folio *folio = page_folio(page);
- int rc;
-
- /*
- * Secure pages cannot be huge and userspace should not combine both.
- * In case userspace does it anyway this will result in an -EFAULT for
- * the unpack. The guest is thus never reaching secure mode.
- * If userspace plays dirty tricks and decides to map huge pages at a
- * later point in time, it will receive a segmentation fault or
- * KVM_RUN will return -EFAULT.
- */
- if (folio_test_hugetlb(folio))
- return -EFAULT;
- if (folio_test_large(folio)) {
- mmap_read_unlock(gmap->mm);
- rc = kvm_s390_wiggle_split_folio(gmap->mm, folio, true);
- mmap_read_lock(gmap->mm);
- if (rc)
- return rc;
- folio = page_folio(page);
- }
-
- if (!folio_trylock(folio))
- return -EAGAIN;
- if (should_export_before_import(uvcb, gmap->mm))
- uv_convert_from_secure(folio_to_phys(folio));
- rc = make_folio_secure(folio, uvcb);
- folio_unlock(folio);
-
- /*
- * In theory a race is possible and the folio might have become
- * large again before the folio_trylock() above. In that case, no
- * action is performed and -EAGAIN is returned; the callers will
- * have to try again later.
- * In most cases this implies running the VM again, getting the same
- * exception again, and make another attempt in this function.
- * This is expected to happen extremely rarely.
- */
- if (rc == -E2BIG)
- return -EAGAIN;
- /* The folio has too many references, try to shake some off */
- if (rc == -EBUSY) {
- mmap_read_unlock(gmap->mm);
- kvm_s390_wiggle_split_folio(gmap->mm, folio, false);
- mmap_read_lock(gmap->mm);
- return -EAGAIN;
- }
-
- return rc;
-}
-
-/**
* gmap_make_secure() - make one guest page secure
* @gmap: the guest gmap
* @gaddr: the guest address that needs to be made secure
* @uvcb: the UVCB specifying which operation needs to be performed
*
* Context: needs to be called with kvm->srcu held.
- * Return: 0 on success, < 0 in case of error (see __gmap_make_secure()).
+ * Return: 0 on success, < 0 in case of error.
*/
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
{
struct kvm *kvm = gmap->private;
- struct page *page;
- int rc = 0;
+ unsigned long vmaddr;
lockdep_assert_held(&kvm->srcu);
- page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
- mmap_read_lock(gmap->mm);
- if (page)
- rc = __gmap_make_secure(gmap, page, uvcb);
- kvm_release_page_clean(page);
- mmap_read_unlock(gmap->mm);
-
- return rc;
+ vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
+ return -EFAULT;
+ return make_hva_secure(gmap->mm, vmaddr, uvcb);
}
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)