[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 09/11] x86/altp2m: define and implement alternate p2m HVMOP types.



Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          | 217 ++++++++++++++++++++++++++++++++++++++++
 xen/include/public/hvm/hvm_op.h |  68 +++++++++++++
 2 files changed, 285 insertions(+)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e6f64a3..afe16bf 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6145,6 +6145,223 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
     }
 
+    case HVMOP_altp2m_get_domain_state:
+    {
+        struct xen_hvm_altp2m_domain_state a;
+        struct domain *d;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() )
+            goto param_fail9;
+
+        a.state = altp2mhvm_active(d);
+        rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+
+    param_fail9:
+        rcu_unlock_domain(d);
+        break;
+    }
+
+    case HVMOP_altp2m_set_domain_state:
+    {
+        struct xen_hvm_altp2m_domain_state a;
+        struct domain *d;
+        struct vcpu *v;
+        bool_t ostate;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
+             nestedhvm_enabled(d) )
+            goto param_fail10;
+
+        ostate = d->arch.altp2m_active;
+        d->arch.altp2m_active = !!a.state;
+
+        /* If the alternate p2m state has changed, handle appropriately */
+        if ( d->arch.altp2m_active != ostate )
+        {
+            if ( !ostate && !p2m_init_altp2m_by_id(d, 0) )
+                    goto param_fail10;
+
+            for_each_vcpu( d, v )
+                if (!ostate)
+                    altp2mhvm_vcpu_initialise(v);
+                else
+                    altp2mhvm_vcpu_destroy(v);
+
+            if ( ostate )
+                p2m_flush_altp2m(d);
+        }
+
+        rc = 0;
+
+    param_fail10:
+        rcu_unlock_domain(d);
+        break;
+    }
+
+    case HVMOP_altp2m_vcpu_enable_notify:
+    {
+        struct vcpu *curr = current;
+        struct xen_hvm_altp2m_vcpu_enable_notify a;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        if ( !is_hvm_domain(curr_d) || !hvm_altp2m_supported() ||
+             !curr_d->arch.altp2m_active || vcpu_altp2mhvm(curr).veinfo )
+            return -EINVAL;
+
+        vcpu_altp2mhvm(curr).veinfo = a.pfn;
+        ahvm_vcpu_update_vmfunc_ve(curr);
+        rc = 0;
+
+        break;
+    }
+
+    case HVMOP_altp2m_create_p2m:
+    {
+        struct xen_hvm_altp2m_view a;
+        struct domain *d;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
+             !d->arch.altp2m_active )
+            goto param_fail11;
+
+        if ( !p2m_init_next_altp2m(d, &a.view) )
+            goto param_fail11;
+
+        p2m_set_altp2m_mem_access(d, a.view, ~0ul, a.hvmmem_default_access);
+
+        rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+
+    param_fail11:
+        rcu_unlock_domain(d);
+        break;
+    }
+
+    case HVMOP_altp2m_destroy_p2m:
+    {
+        struct xen_hvm_altp2m_view a;
+        struct domain *d;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
+             !d->arch.altp2m_active )
+            goto param_fail12;
+
+        if ( p2m_destroy_altp2m_by_id(d, a.view) )
+            rc = 0;
+
+    param_fail12:
+        rcu_unlock_domain(d);
+        break;
+    }
+
+    case HVMOP_altp2m_switch_p2m:
+    {
+        struct xen_hvm_altp2m_view a;
+        struct domain *d;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
+             !d->arch.altp2m_active )
+            goto param_fail13;
+
+        if ( p2m_switch_domain_altp2m_by_id(d, a.view) )
+            rc = 0;
+
+    param_fail13:
+        rcu_unlock_domain(d);
+        break;
+    }
+
+    case HVMOP_altp2m_set_mem_access:
+    {
+        struct xen_hvm_altp2m_set_mem_access a;
+        struct domain *d;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
+             !d->arch.altp2m_active )
+            goto param_fail14;
+
+        if ( p2m_set_altp2m_mem_access(d, a.view, a.pfn, a.hvmmem_access) )
+            rc = 0;
+
+    param_fail14:
+        rcu_unlock_domain(d);
+        break;
+    }
+
+    case HVMOP_altp2m_change_pfn:
+    {
+        struct xen_hvm_altp2m_change_pfn a;
+        struct domain *d;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            return -ESRCH;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ||
+             !d->arch.altp2m_active )
+            goto param_fail15;
+
+        if ( p2m_change_altp2m_pfn(d, a.view, a.old_pfn, a.new_pfn) )
+            rc = 0;
+
+    param_fail15:
+        rcu_unlock_domain(d);
+        break;
+    }
+
     default:
     {
         gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op);
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index eeb0a60..ea542ec 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -369,6 +369,74 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
 
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
+/* Set/get the altp2m state for a domain */
+#define HVMOP_altp2m_set_domain_state     23
+#define HVMOP_altp2m_get_domain_state     24
+struct xen_hvm_altp2m_domain_state {
+    /* Domain to be updated or queried */
+    domid_t domid;
+    /* IN or OUT variable on/off */
+    uint8_t state;
+};
+typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
+
+/* Set the current VCPU to receive altp2m event notifications */
+#define HVMOP_altp2m_vcpu_enable_notify   25
+struct xen_hvm_altp2m_vcpu_enable_notify {
+    /* #VE info area pfn */
+    uint64_t pfn;
+};
+typedef struct xen_hvm_altp2m_vcpu_enable_notify 
xen_hvm_altp2m_vcpu_enable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
+
+/* Create a new view */
+#define HVMOP_altp2m_create_p2m   26
+/* Destroy a view */
+#define HVMOP_altp2m_destroy_p2m  27
+/* Switch view for an entire domain */
+#define HVMOP_altp2m_switch_p2m   28
+struct xen_hvm_altp2m_view {
+    /* Domain to be updated */
+    domid_t domid;
+    /* IN/OUT variable */
+    uint16_t view;
+    /* Create view only: default access type */
+    uint16_t hvmmem_default_access; /* xenmem_access_t */
+};
+typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
+
+/* Notify that a page of memory is to have specific access types */
+#define HVMOP_altp2m_set_mem_access 29
+struct xen_hvm_altp2m_set_mem_access {
+    /* Domain to be updated. */
+    domid_t domid;
+    /* view */
+    uint16_t view;
+    /* Memory type */
+    uint16_t hvmmem_access; /* xenmem_access_t */
+    /* pfn */
+    uint64_t pfn;
+};
+typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
+
+/* Change a p2m entry to map a different pfn */
+#define HVMOP_altp2m_change_pfn 30
+struct xen_hvm_altp2m_change_pfn {
+    /* Domain to be updated. */
+    domid_t domid;
+    /* view */
+    uint16_t view;
+    /* old pfn */
+    uint64_t old_pfn;
+    /* new pfn, -1 means revert */
+    uint64_t new_pfn;
+};
+typedef struct xen_hvm_altp2m_change_pfn xen_hvm_altp2m_change_pfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_pfn_t);
+
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
 
 /*
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.