|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/7] vm-event/x86: use vm_event_vcpu_enter properly
After introducing vm_event_vcpu_enter, it makes sense to move the following
code there:
- handling of monitor_write_data from hvm_do_resume
- enabling/disabling CPU_BASED_CR3_LOAD_EXITING from vmx_update_guest_cr(v, 0)
Signed-off-by: Corneliu ZUZU <czuzu@xxxxxxxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 62 +++++--------------------
xen/arch/x86/hvm/vmx/vmx.c | 12 ++---
xen/arch/x86/monitor.c | 9 ----
xen/arch/x86/vm_event.c | 102 +++++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/vm_event.h | 5 +-
5 files changed, 119 insertions(+), 71 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 770bb50..2f48846 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -462,52 +462,6 @@ void hvm_do_resume(struct vcpu *v)
if ( !handle_hvm_io_completion(v) )
return;
- if ( unlikely(v->arch.vm_event) )
- {
- struct monitor_write_data *w = &v->arch.vm_event->write_data;
-
- if ( v->arch.vm_event->emulate_flags )
- {
- enum emul_kind kind = EMUL_KIND_NORMAL;
-
- if ( v->arch.vm_event->emulate_flags &
- VM_EVENT_FLAG_SET_EMUL_READ_DATA )
- kind = EMUL_KIND_SET_CONTEXT;
- else if ( v->arch.vm_event->emulate_flags &
- VM_EVENT_FLAG_EMULATE_NOWRITE )
- kind = EMUL_KIND_NOWRITE;
-
- hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
- HVM_DELIVER_NO_ERROR_CODE);
-
- v->arch.vm_event->emulate_flags = 0;
- }
-
- if ( w->do_write.msr )
- {
- hvm_msr_write_intercept(w->msr, w->value, 0);
- w->do_write.msr = 0;
- }
-
- if ( w->do_write.cr0 )
- {
- hvm_set_cr0(w->cr0, 0);
- w->do_write.cr0 = 0;
- }
-
- if ( w->do_write.cr4 )
- {
- hvm_set_cr4(w->cr4, 0);
- w->do_write.cr4 = 0;
- }
-
- if ( w->do_write.cr3 )
- {
- hvm_set_cr3(w->cr3, 0);
- w->do_write.cr3 = 0;
- }
- }
-
vm_event_vcpu_enter(v);
/* Inject pending hw/sw trap */
@@ -2199,7 +2153,9 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
if ( hvm_event_crX(CR0, value, old_value) )
{
- /* The actual write will occur in hvm_do_resume(), if permitted. */
+ /* The actual write will occur in vcpu_enter_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.cr0 = 1;
v->arch.vm_event->write_data.cr0 = value;
@@ -2301,7 +2257,9 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
if ( hvm_event_crX(CR3, value, old) )
{
- /* The actual write will occur in hvm_do_resume(), if permitted. */
+ /* The actual write will occur in vcpu_enter_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.cr3 = 1;
v->arch.vm_event->write_data.cr3 = value;
@@ -2381,7 +2339,9 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
if ( hvm_event_crX(CR4, value, old_cr) )
{
- /* The actual write will occur in hvm_do_resume(), if permitted. */
+ /* The actual write will occur in vcpu_enter_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.cr4 = 1;
v->arch.vm_event->write_data.cr4 = value;
@@ -3761,7 +3721,9 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
{
ASSERT(v->arch.vm_event);
- /* The actual write will occur in hvm_do_resume() (if permitted). */
+ /* The actual write will occur in vcpu_enter_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.msr = 1;
v->arch.vm_event->write_data.msr = msr;
v->arch.vm_event->write_data.value = msr_content;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index b43b94a..8b76ef9 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -35,7 +35,6 @@
#include <asm/guest_access.h>
#include <asm/debugreg.h>
#include <asm/msr.h>
-#include <asm/paging.h>
#include <asm/p2m.h>
#include <asm/mem_sharing.h>
#include <asm/hvm/emulate.h>
@@ -58,7 +57,6 @@
#include <asm/hvm/nestedhvm.h>
#include <asm/altp2m.h>
#include <asm/event.h>
-#include <asm/monitor.h>
#include <public/arch-x86/cpuid.h>
static bool_t __initdata opt_force_ept;
@@ -1432,18 +1430,16 @@ static void vmx_update_guest_cr(struct vcpu *v,
unsigned int cr)
if ( paging_mode_hap(v->domain) )
{
/* Manage GUEST_CR3 when CR0.PE=0. */
+ uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING);
+
v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
v->arch.hvm_vmx.exec_control |= cr3_ctls;
- /* Trap CR3 updates if CR3 memory events are enabled. */
- if ( v->domain->arch.monitor.write_ctrlreg_enabled &
- monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
- v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
-
- vmx_update_cpu_exec_control(v);
+ if ( old_ctls != v->arch.hvm_vmx.exec_control )
+ vmx_update_cpu_exec_control(v);
}
if ( !nestedhvm_vcpu_in_guestmode(v) )
diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
index 1fec412..1e5445f 100644
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -20,7 +20,6 @@
*/
#include <asm/monitor.h>
-#include <public/vm_event.h>
int arch_monitor_domctl_event(struct domain *d,
struct xen_domctl_monitor_op *mop)
@@ -62,14 +61,6 @@ int arch_monitor_domctl_event(struct domain *d,
else
ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
- if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
- {
- struct vcpu *v;
- /* Latches new CR3 mask through CR0 code. */
- for_each_vcpu ( d, v )
- hvm_update_guest_cr(v, 0);
- }
-
domain_unpause(d);
break;
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index f7eb24a..94b50fc 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -19,6 +19,9 @@
*/
#include <xen/vm_event.h>
+#include <asm/monitor.h>
+#include <asm/paging.h>
+#include <asm/hvm/vmx/vmx.h>
/* Implicitly serialized by the domctl lock. */
int vm_event_init_domain(struct domain *d)
@@ -179,6 +182,105 @@ void vm_event_fill_regs(vm_event_request_t *req)
req->data.regs.x86.cs_arbytes = seg.attr.bytes;
}
+static inline void vcpu_enter_write_data(struct vcpu *v)
+{
+ struct monitor_write_data *w;
+
+ if ( likely(!v->arch.vm_event) )
+ return;
+
+ w = &v->arch.vm_event->write_data;
+
+ if ( unlikely(v->arch.vm_event->emulate_flags) )
+ {
+ enum emul_kind kind = EMUL_KIND_NORMAL;
+
+ if ( v->arch.vm_event->emulate_flags &
+ VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+ kind = EMUL_KIND_SET_CONTEXT;
+ else if ( v->arch.vm_event->emulate_flags &
+ VM_EVENT_FLAG_EMULATE_NOWRITE )
+ kind = EMUL_KIND_NOWRITE;
+
+ hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
+ HVM_DELIVER_NO_ERROR_CODE);
+
+ v->arch.vm_event->emulate_flags = 0;
+ }
+
+ if ( w->do_write.msr )
+ {
+ hvm_msr_write_intercept(w->msr, w->value, 0);
+ w->do_write.msr = 0;
+ }
+
+ if ( w->do_write.cr0 )
+ {
+ hvm_set_cr0(w->cr0, 0);
+ w->do_write.cr0 = 0;
+ }
+
+ if ( w->do_write.cr4 )
+ {
+ hvm_set_cr4(w->cr4, 0);
+ w->do_write.cr4 = 0;
+ }
+
+ if ( w->do_write.cr3 )
+ {
+ hvm_set_cr3(w->cr3, 0);
+ w->do_write.cr3 = 0;
+ }
+}
+
+static inline void vcpu_enter_adjust_traps(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ struct arch_vmx_struct *avmx = &v->arch.hvm_vmx;
+ bool_t cr3_ldexit, cr3_vmevent;
+ unsigned int cr3_bitmask;
+
+ /* Adjust CR3 load-exiting (for monitor vm-events). */
+
+ cr3_bitmask = monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3);
+ cr3_vmevent = !!(d->arch.monitor.write_ctrlreg_enabled & cr3_bitmask);
+ cr3_ldexit = !!(avmx->exec_control & CPU_BASED_CR3_LOAD_EXITING);
+
+ if ( likely(cr3_vmevent == cr3_ldexit) )
+ return;
+
+ if ( !paging_mode_hap(d) )
+ {
+ /* non-hap domains trap CR3 writes unconditionally */
+ ASSERT(cr3_ldexit);
+ return;
+ }
+
+ /*
+ * If CR0.PE=0, CR3 load exiting must remain enabled.
+ * See vmx_update_guest_cr code motion for cr = 0.
+ */
+ if ( cr3_ldexit && !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
+ return;
+
+ if ( cr3_vmevent )
+ avmx->exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+ else
+ avmx->exec_control &= ~CPU_BASED_CR3_LOAD_EXITING;
+
+ vmx_vmcs_enter(v);
+ vmx_update_cpu_exec_control(v);
+ vmx_vmcs_exit(v);
+}
+
+void arch_vm_event_vcpu_enter(struct vcpu *v)
+{
+ /* vmx only */
+ ASSERT( cpu_has_vmx );
+ vcpu_enter_write_data(v);
+ vcpu_enter_adjust_traps(v);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index 6fb3b58..c4b5def 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -43,10 +43,7 @@ void vm_event_set_registers(struct vcpu *v,
vm_event_response_t *rsp);
void vm_event_fill_regs(vm_event_request_t *req);
-static inline void arch_vm_event_vcpu_enter(struct vcpu *v)
-{
- /* Nothing to do. */
-}
+void arch_vm_event_vcpu_enter(struct vcpu *v);
/*
* Monitor vm-events.
--
2.5.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |