|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 11/13] x86/altp2m: define and implement alternate p2m HVMOP types.
Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 201 ++++++++++++++++++++++++++++++++++++++++
xen/include/public/hvm/hvm_op.h | 69 ++++++++++++++
2 files changed, 270 insertions(+)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d2d90c8..0d81050 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6447,6 +6447,207 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
+ case HVMOP_altp2m_get_domain_state:
+ {
+ struct xen_hvm_altp2m_domain_state a;
+ struct domain *d;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() )
+ {
+ a.state = altp2m_active(d);
+ rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ }
+
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ case HVMOP_altp2m_set_domain_state:
+ {
+ struct xen_hvm_altp2m_domain_state a;
+ struct domain *d;
+ struct vcpu *v;
+ bool_t ostate;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ !nestedhvm_enabled(d) )
+ {
+ ostate = d->arch.altp2m_active;
+ d->arch.altp2m_active = !!a.state;
+
+ rc = 0;
+
+ /* If the alternate p2m state has changed, handle appropriately */
+ if ( d->arch.altp2m_active != ostate )
+ {
+ if ( ostate || !(rc = p2m_init_altp2m_by_id(d, 0)) )
+ {
+ for_each_vcpu( d, v )
+ {
+ if ( !ostate )
+ altp2m_vcpu_initialise(v);
+ else
+ altp2m_vcpu_destroy(v);
+ }
+
+ if ( ostate )
+ p2m_flush_altp2m(d);
+ }
+ }
+ }
+
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ case HVMOP_altp2m_vcpu_enable_notify:
+ {
+ struct domain *curr_d = current->domain;
+ struct vcpu *curr = current;
+ struct xen_hvm_altp2m_vcpu_enable_notify a;
+ p2m_type_t p2mt;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ if ( !is_hvm_domain(curr_d) || !hvm_altp2m_supported() ||
+ !curr_d->arch.altp2m_active ||
+ gfn_x(vcpu_altp2m(curr).veinfo_gfn) != INVALID_GFN)
+ return -EINVAL;
+
+ if ( mfn_x(get_gfn_query_unlocked(curr_d, a.gfn, &p2mt)) ==
+ INVALID_MFN )
+ return -EINVAL;
+
+ vcpu_altp2m(curr).veinfo_gfn = _gfn(a.gfn);
+ ap2m_vcpu_update_vmfunc_ve(curr);
+ rc = 0;
+
+ break;
+ }
+
+ case HVMOP_altp2m_create_p2m:
+ {
+ struct xen_hvm_altp2m_view a;
+ struct domain *d;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active &&
+ !(rc = p2m_init_next_altp2m(d, &a.view)) )
+ rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ case HVMOP_altp2m_destroy_p2m:
+ {
+ struct xen_hvm_altp2m_view a;
+ struct domain *d;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_destroy_altp2m_by_id(d, a.view);
+
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ case HVMOP_altp2m_switch_p2m:
+ {
+ struct xen_hvm_altp2m_view a;
+ struct domain *d;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_switch_domain_altp2m_by_id(d, a.view);
+
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ case HVMOP_altp2m_set_mem_access:
+ {
+ struct xen_hvm_altp2m_set_mem_access a;
+ struct domain *d;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_set_altp2m_mem_access(d, a.view, _gfn(a.gfn),
a.hvmmem_access);
+
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ case HVMOP_altp2m_change_gfn:
+ {
+ struct xen_hvm_altp2m_change_gfn a;
+ struct domain *d;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_change_altp2m_gfn(d, a.view, _gfn(a.old_gfn),
_gfn(a.new_gfn));
+
+ rcu_unlock_domain(d);
+ break;
+ }
+
default:
{
gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op);
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 9b84e84..3fa7b47 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -396,6 +396,75 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
#endif /* defined(__i386__) || defined(__x86_64__) */
+/* Set/get the altp2m state for a domain */
+#define HVMOP_altp2m_set_domain_state 24
+#define HVMOP_altp2m_get_domain_state 25
+struct xen_hvm_altp2m_domain_state {
+ /* Domain to be updated or queried */
+ domid_t domid;
+ /* IN or OUT variable on/off */
+ uint8_t state;
+};
+typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
+
+/* Set the current VCPU to receive altp2m event notifications */
+#define HVMOP_altp2m_vcpu_enable_notify 26
+struct xen_hvm_altp2m_vcpu_enable_notify {
+ /* #VE info area gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_vcpu_enable_notify
xen_hvm_altp2m_vcpu_enable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
+
+/* Create a new view */
+#define HVMOP_altp2m_create_p2m 27
+/* Destroy a view */
+#define HVMOP_altp2m_destroy_p2m 28
+/* Switch view for an entire domain */
+#define HVMOP_altp2m_switch_p2m 29
+struct xen_hvm_altp2m_view {
+ /* Domain to be updated */
+ domid_t domid;
+ /* IN/OUT variable */
+ uint16_t view;
+ /* Create view only: default access type
+ * NOTE: currently ignored */
+ uint16_t hvmmem_default_access; /* xenmem_access_t */
+};
+typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
+
+/* Notify that a page of memory is to have specific access types */
+#define HVMOP_altp2m_set_mem_access 30
+struct xen_hvm_altp2m_set_mem_access {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* view */
+ uint16_t view;
+ /* Memory type */
+ uint16_t hvmmem_access; /* xenmem_access_t */
+ /* gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
+
+/* Change a p2m entry to have a different gfn->mfn mapping */
+#define HVMOP_altp2m_change_gfn 31
+struct xen_hvm_altp2m_change_gfn {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* view */
+ uint16_t view;
+ /* old gfn */
+ uint64_t old_gfn;
+ /* new gfn, INVALID_GFN (~0UL) means revert */
+ uint64_t new_gfn;
+};
+typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
/*
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |