[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V3] x86/altp2m: clean up p2m_{get/set}_suppress_ve()



Move p2m_{get/set}_suppress_ve() to p2m.c, replace incorrect
ASSERT() in p2m-pt.c (since a guest can run in shadow mode even on
a system with virt exceptions, which would trigger the ASSERT()),
move the VMX-isms (cpu_has_vmx_virt_exceptions checks) to
p2m_ept_{get/set}_entry(), and fix locking code in
p2m_get_suppress_ve().

Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>

---
Changes since V2:
 - Rebased on staging, double-checked the patch.
---
 xen/arch/x86/mm/mem_access.c | 102 -------------------------------------------
 xen/arch/x86/mm/p2m-ept.c    |  11 +++++
 xen/arch/x86/mm/p2m-pt.c     |   3 +-
 xen/arch/x86/mm/p2m.c        |  90 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 103 insertions(+), 103 deletions(-)

diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index d9e64fc..3d50fe0 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -514,108 +514,6 @@ void arch_p2m_set_access_required(struct domain *d, bool 
access_required)
 #endif
 }
 
-#ifdef CONFIG_HVM
-/*
- * Set/clear the #VE suppress bit for a page.  Only available on VMX.
- */
-int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve,
-                        unsigned int altp2m_idx)
-{
-    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
-    struct p2m_domain *ap2m = NULL;
-    struct p2m_domain *p2m;
-    mfn_t mfn;
-    p2m_access_t a;
-    p2m_type_t t;
-    int rc;
-
-    if ( !cpu_has_vmx_virt_exceptions )
-        return -EOPNOTSUPP;
-
-    /* #VE should be enabled for this vcpu. */
-    if ( gfn_eq(vcpu_altp2m(current).veinfo_gfn, INVALID_GFN) )
-        return -ENXIO;
-
-    if ( altp2m_idx > 0 )
-    {
-        if ( altp2m_idx >= MAX_ALTP2M ||
-             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
-            return -EINVAL;
-
-        p2m = ap2m = d->arch.altp2m_p2m[altp2m_idx];
-    }
-    else
-        p2m = host_p2m;
-
-    gfn_lock(host_p2m, gfn, 0);
-
-    if ( ap2m )
-        p2m_lock(ap2m);
-
-    mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
-    if ( !mfn_valid(mfn) )
-    {
-        rc = -ESRCH;
-        goto out;
-    }
-
-    rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, suppress_ve);
-
-out:
-    if ( ap2m )
-        p2m_unlock(ap2m);
-
-    gfn_unlock(host_p2m, gfn, 0);
-
-    return rc;
-}
-
-int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
-                        unsigned int altp2m_idx)
-{
-    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
-    struct p2m_domain *ap2m = NULL;
-    struct p2m_domain *p2m;
-    mfn_t mfn;
-    p2m_access_t a;
-    p2m_type_t t;
-
-    if ( !cpu_has_vmx_virt_exceptions )
-        return -EOPNOTSUPP;
-
-    /* #VE should be enabled for this vcpu. */
-    if ( gfn_eq(vcpu_altp2m(current).veinfo_gfn, INVALID_GFN) )
-        return -ENXIO;
-
-    if ( altp2m_idx > 0 )
-    {
-        if ( altp2m_idx >= MAX_ALTP2M ||
-             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
-            return -EINVAL;
-
-        p2m = ap2m = d->arch.altp2m_p2m[altp2m_idx];
-    }
-    else
-        p2m = host_p2m;
-
-    gfn_lock(host_p2m, gfn, 0);
-
-    if ( ap2m )
-        p2m_lock(ap2m);
-
-    mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, suppress_ve);
-    if ( !mfn_valid(mfn) )
-        return -ESRCH;
-
-    if ( ap2m )
-        p2m_unlock(ap2m);
-
-    gfn_unlock(host_p2m, gfn, 0);
-
-    return 0;
-}
-#endif
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 1ff4f14..d376966 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -697,6 +697,17 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
     struct domain *d = p2m->domain;
 
     ASSERT(ept);
+
+    if ( !sve )
+    {
+        if ( !cpu_has_vmx_virt_exceptions )
+            return -EOPNOTSUPP;
+
+        /* #VE should be enabled for this vcpu. */
+        if ( gfn_eq(vcpu_altp2m(current).veinfo_gfn, INVALID_GFN) )
+            return -ENXIO;
+    }
+
     /*
      * the caller must make sure:
      * 1. passing valid gfn and mfn at order boundary.
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 40bfc76..33dd129 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -501,7 +501,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
     unsigned int flags, iommu_old_flags = 0;
     unsigned long old_mfn = mfn_x(INVALID_MFN);
 
-    ASSERT(sve != 0);
+    if ( !sve )
+        return -EOPNOTSUPP;
 
     if ( tb_init_done )
     {
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ed2e8da..d6a8810 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2764,6 +2764,96 @@ out:
         rcu_unlock_domain(fdom);
     return rc;
 }
+
+#ifdef CONFIG_HVM
+/*
+ * Set/clear the #VE suppress bit for a page.  Only available on VMX.
+ */
+int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve,
+                        unsigned int altp2m_idx)
+{
+    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
+    struct p2m_domain *ap2m = NULL;
+    struct p2m_domain *p2m;
+    mfn_t mfn;
+    p2m_access_t a;
+    p2m_type_t t;
+    int rc;
+
+    if ( altp2m_idx > 0 )
+    {
+        if ( altp2m_idx >= MAX_ALTP2M ||
+             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+            return -EINVAL;
+
+        p2m = ap2m = d->arch.altp2m_p2m[altp2m_idx];
+    }
+    else
+        p2m = host_p2m;
+
+    gfn_lock(host_p2m, gfn, 0);
+
+    if ( ap2m )
+        p2m_lock(ap2m);
+
+    mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
+    if ( !mfn_valid(mfn) )
+    {
+        rc = -ESRCH;
+        goto out;
+    }
+
+    rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, suppress_ve);
+
+out:
+    if ( ap2m )
+        p2m_unlock(ap2m);
+
+    gfn_unlock(host_p2m, gfn, 0);
+
+    return rc;
+}
+
+int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
+                        unsigned int altp2m_idx)
+{
+    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
+    struct p2m_domain *ap2m = NULL;
+    struct p2m_domain *p2m;
+    mfn_t mfn;
+    p2m_access_t a;
+    p2m_type_t t;
+    int rc = 0;
+
+    if ( altp2m_idx > 0 )
+    {
+        if ( altp2m_idx >= MAX_ALTP2M ||
+             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+            return -EINVAL;
+
+        p2m = ap2m = d->arch.altp2m_p2m[altp2m_idx];
+    }
+    else
+        p2m = host_p2m;
+
+    gfn_lock(host_p2m, gfn, 0);
+
+    if ( ap2m )
+        p2m_lock(ap2m);
+
+    mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, suppress_ve);
+    if ( !mfn_valid(mfn) )
+        rc = -ESRCH;
+
+    if ( ap2m )
+        p2m_unlock(ap2m);
+
+    gfn_unlock(host_p2m, gfn, 0);
+
+    return rc;
+}
+#endif
+
 /*
  * Local variables:
  * mode: C
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.