|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V6 3/3] xen/vm_event: Deny register writes if refused by vm_event reply
Deny register writes if a vm_client subscribed to mov_to_msr or
control register write events forbids them. Currently supported for
MSR, CR0, CR3 and CR4 events.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Tamas K Lengyel <tlengyel@xxxxxxxxxxx>
---
Changes since V5:
- Now using vzalloc() / vfree() for d->arch.event_write_data,
and setting it to NULL after releasing it in arch_domain_destroy()
for safety.
---
xen/arch/x86/domain.c | 3 +
xen/arch/x86/hvm/emulate.c | 8 +--
xen/arch/x86/hvm/event.c | 5 +-
xen/arch/x86/hvm/hvm.c | 118 ++++++++++++++++++++++++++++++++-----
xen/arch/x86/hvm/svm/nestedsvm.c | 14 ++---
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 15 +++--
xen/arch/x86/hvm/vmx/vvmx.c | 18 +++---
xen/arch/x86/vm_event.c | 43 ++++++++++++++
xen/common/vm_event.c | 4 ++
xen/include/asm-arm/vm_event.h | 7 +++
xen/include/asm-x86/domain.h | 18 +++++-
xen/include/asm-x86/hvm/event.h | 9 ++-
xen/include/asm-x86/hvm/support.h | 9 +--
xen/include/asm-x86/vm_event.h | 3 +
xen/include/public/vm_event.h | 5 ++
16 files changed, 235 insertions(+), 46 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 1ef9fad..045f6ff 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -668,6 +668,9 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
void arch_domain_destroy(struct domain *d)
{
+ vfree(d->arch.event_write_data);
+ d->arch.event_write_data = NULL;
+
if ( has_hvm_container_domain(d) )
hvm_domain_destroy(d);
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 2766919..bc7514a 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1428,14 +1428,14 @@ static int hvmemul_write_cr(
switch ( reg )
{
case 0:
- return hvm_set_cr0(val);
+ return hvm_set_cr0(val, 1);
case 2:
current->arch.hvm_vcpu.guest_cr[2] = val;
return X86EMUL_OKAY;
case 3:
- return hvm_set_cr3(val);
+ return hvm_set_cr3(val, 1);
case 4:
- return hvm_set_cr4(val);
+ return hvm_set_cr4(val, 1);
default:
break;
}
@@ -1456,7 +1456,7 @@ static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_write_intercept(reg, val);
+ return hvm_msr_write_intercept(reg, val, 1);
}
static int hvmemul_wbinvd(
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
index 17638ea..042e583 100644
--- a/xen/arch/x86/hvm/event.c
+++ b/xen/arch/x86/hvm/event.c
@@ -90,7 +90,7 @@ static int hvm_event_traps(uint8_t sync, vm_event_request_t
*req)
return 1;
}
-void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
+bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
{
struct arch_domain *currad = ¤t->domain->arch;
unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
@@ -109,7 +109,10 @@ void hvm_event_cr(unsigned int index, unsigned long value,
unsigned long old)
hvm_event_traps(currad->monitor.write_ctrlreg_sync & ctrlreg_bitmask,
&req);
+ return 1;
}
+
+ return 0;
}
void hvm_event_msr(unsigned int msr, uint64_t value)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 22dbab1..c07e3ef 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -52,6 +52,7 @@
#include <asm/traps.h>
#include <asm/mc146818rtc.h>
#include <asm/mce.h>
+#include <asm/monitor.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/vpt.h>
#include <asm/hvm/support.h>
@@ -519,6 +520,35 @@ void hvm_do_resume(struct vcpu *v)
break;
}
+ if ( unlikely(d->arch.event_write_data) )
+ {
+ struct monitor_write_data *w = &d->arch.event_write_data[v->vcpu_id];
+
+ if ( w->do_write.msr )
+ {
+ hvm_msr_write_intercept(w->msr, w->value, 0);
+ w->do_write.msr = 0;
+ }
+
+ if ( w->do_write.cr0 )
+ {
+ hvm_set_cr0(w->cr0, 0);
+ w->do_write.cr0 = 0;
+ }
+
+ if ( w->do_write.cr4 )
+ {
+ hvm_set_cr4(w->cr4, 0);
+ w->do_write.cr4 = 0;
+ }
+
+ if ( w->do_write.cr3 )
+ {
+ hvm_set_cr3(w->cr3, 0);
+ w->do_write.cr3 = 0;
+ }
+ }
+
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
@@ -3123,13 +3153,13 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
switch ( cr )
{
case 0:
- return hvm_set_cr0(val);
+ return hvm_set_cr0(val, 1);
case 3:
- return hvm_set_cr3(val);
+ return hvm_set_cr3(val, 1);
case 4:
- return hvm_set_cr4(val);
+ return hvm_set_cr4(val, 1);
case 8:
vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
@@ -3226,12 +3256,13 @@ static void hvm_update_cr(struct vcpu *v, unsigned int
cr, unsigned long value)
hvm_update_guest_cr(v, cr);
}
-int hvm_set_cr0(unsigned long value)
+int hvm_set_cr0(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
struct page_info *page;
+ struct arch_domain *currad = &v->domain->arch;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -3261,6 +3292,22 @@ int hvm_set_cr0(unsigned long value)
goto gpf;
}
+ if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) &&
+ value != old_value )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR0, value, old_value) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr0 = 1;
+ currad->event_write_data[v->vcpu_id].cr0 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
+
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
@@ -3327,7 +3374,6 @@ int hvm_set_cr0(unsigned long value)
hvm_funcs.handle_cd(v, value);
hvm_update_cr(v, 0, value);
- hvm_event_crX(CR0, value, old_value);
if ( (value ^ old_value) & X86_CR0_PG ) {
if ( !nestedhvm_vmswitch_in_progress(v) &&
nestedhvm_vcpu_in_guestmode(v) )
@@ -3343,11 +3389,28 @@ int hvm_set_cr0(unsigned long value)
return X86EMUL_EXCEPTION;
}
-int hvm_set_cr3(unsigned long value)
+int hvm_set_cr3(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
struct page_info *page;
- unsigned long old;
+ unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
+ struct arch_domain *currad = &v->domain->arch;
+
+ if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) &&
+ value != old )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR3, value, old) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr3 = 1;
+ currad->event_write_data[v->vcpu_id].cr3 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
(value != v->arch.hvm_vcpu.guest_cr[3]) )
@@ -3365,10 +3428,8 @@ int hvm_set_cr3(unsigned long value)
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
- old=v->arch.hvm_vcpu.guest_cr[3];
v->arch.hvm_vcpu.guest_cr[3] = value;
paging_update_cr3(v);
- hvm_event_crX(CR3, value, old);
return X86EMUL_OKAY;
bad_cr3:
@@ -3377,10 +3438,11 @@ int hvm_set_cr3(unsigned long value)
return X86EMUL_UNHANDLEABLE;
}
-int hvm_set_cr4(unsigned long value)
+int hvm_set_cr4(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
unsigned long old_cr;
+ struct arch_domain *currad = &v->domain->arch;
if ( value & hvm_cr4_guest_reserved_bits(v, 0) )
{
@@ -3408,8 +3470,23 @@ int hvm_set_cr4(unsigned long value)
goto gpf;
}
+ if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) &&
+ value != old_cr )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ if ( hvm_event_crX(CR4, value, old_cr) )
+ {
+ /* The actual write will occur in hvm_do_resume(), if permitted. */
+ currad->event_write_data[v->vcpu_id].do_write.cr4 = 1;
+ currad->event_write_data[v->vcpu_id].cr4 = value;
+
+ return X86EMUL_OKAY;
+ }
+ }
+
hvm_update_cr(v, 4, value);
- hvm_event_crX(CR4, value, old_cr);
/*
* Modifying CR4.{PSE,PAE,PGE,SMEP}, or clearing CR4.PCIDE
@@ -3873,7 +3950,7 @@ void hvm_task_switch(
goto out;
- if ( hvm_set_cr3(tss.cr3) )
+ if ( hvm_set_cr3(tss.cr3, 1) )
goto out;
regs->eip = tss.eip;
@@ -4575,12 +4652,14 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
goto out;
}
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
+ bool_t may_defer)
{
struct vcpu *v = current;
bool_t mtrr;
unsigned int edx, index;
int ret = X86EMUL_OKAY;
+ struct arch_domain *currad = ¤t->domain->arch;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
@@ -4588,7 +4667,18 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content)
hvm_cpuid(1, NULL, NULL, NULL, &edx);
mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
- hvm_event_msr(msr, msr_content);
+ if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) )
+ {
+ ASSERT(currad->event_write_data != NULL);
+
+ /* The actual write will occur in hvm_do_resume() (if permitted). */
+ currad->event_write_data[v->vcpu_id].do_write.msr = 1;
+ currad->event_write_data[v->vcpu_id].msr = msr;
+ currad->event_write_data[v->vcpu_id].value = msr_content;
+
+ hvm_event_msr(msr, msr_content);
+ return X86EMUL_OKAY;
+ }
switch ( msr )
{
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 2653bc1..f22b7d1 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -274,7 +274,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4;
- rc = hvm_set_cr4(n1vmcb->_cr4);
+ rc = hvm_set_cr4(n1vmcb->_cr4, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -283,7 +283,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]);
v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
- rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE);
+ rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
@@ -309,7 +309,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.guest_table = pagetable_null();
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
}
- rc = hvm_set_cr3(n1vmcb->_cr3);
+ rc = hvm_set_cr3(n1vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
@@ -534,7 +534,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
- rc = hvm_set_cr4(ns_vmcb->_cr4);
+ rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
@@ -542,7 +542,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
- rc = hvm_set_cr0(cr0);
+ rc = hvm_set_cr0(cr0, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
@@ -558,7 +558,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else if (paging_mode_hap(v->domain)) {
@@ -570,7 +570,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
* we assume it intercepts page faults.
*/
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else {
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 70de49e..b8bba71 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1949,7 +1949,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
return;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- rc = hvm_msr_write_intercept(regs->ecx, msr_content);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_content, 1);
}
if ( rc == X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bc3212f..d3183a8 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2016,9 +2016,16 @@ static int vmx_cr_access(unsigned long
exit_qualification)
}
case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
- curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+ unsigned long value = old & ~X86_CR0_TS;
+
+ /*
+ * Special case unlikely to be interesting to a
+ * VM_EVENT_FLAG_DENY-capable application, so the hvm_event_crX()
+ * return value is ignored for now.
+ */
+ hvm_event_crX(CR0, value, old);
+ curr->arch.hvm_vcpu.guest_cr[0] = value;
vmx_update_guest_cr(curr, 0);
- hvm_event_crX(CR0, curr->arch.hvm_vcpu.guest_cr[0], old);
HVMTRACE_0D(CLTS);
break;
}
@@ -2030,7 +2037,7 @@ static int vmx_cr_access(unsigned long exit_qualification)
(VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
- return hvm_set_cr0(value);
+ return hvm_set_cr0(value, 1);
}
default:
BUG();
@@ -3053,7 +3060,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
uint64_t msr_content;
msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
- if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
+ if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) ==
X86EMUL_OKAY )
update_guest_eip(); /* Safe: WRMSR */
break;
}
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 72dd9c8..555fdfa 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1034,15 +1034,16 @@ static void load_shadow_guest_state(struct vcpu *v)
nvcpu->guest_cr[0] = __get_vvmcs(vvmcs, CR0_READ_SHADOW);
nvcpu->guest_cr[4] = __get_vvmcs(vvmcs, CR4_READ_SHADOW);
- hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0));
- hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
- hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
+ hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0), 1);
+ hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4), 1);
+ hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_ENTRY_CONTROLS);
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, GUEST_PAT));
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
GUEST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, GUEST_PERF_GLOBAL_CTRL), 0);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
@@ -1235,15 +1236,16 @@ static void load_vvmcs_host_state(struct vcpu *v)
__vmwrite(vmcs_h2g_field[i].guest_field, r);
}
- hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0));
- hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
- hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
+ hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0), 1);
+ hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4), 1);
+ hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3), 1);
control = __get_vvmcs(vvmcs, VM_EXIT_CONTROLS);
if ( control & VM_EXIT_LOAD_HOST_PAT )
hvm_set_guest_pat(v, __get_vvmcs(vvmcs, HOST_PAT));
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, __get_vvmcs(vvmcs,
HOST_PERF_GLOBAL_CTRL));
+ hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ __get_vvmcs(vvmcs, HOST_PERF_GLOBAL_CTRL), 1);
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index ec856fb..5fba6b7 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -22,12 +22,20 @@
#include <xen/sched.h>
#include <asm/hvm/hvm.h>
+#include <asm/vm_event.h>
/* Implicitly serialized by the domctl lock. */
int vm_event_init_domain(struct domain *d)
{
struct vcpu *v;
+ if ( !d->arch.event_write_data )
+ d->arch.event_write_data =
+ vzalloc(sizeof(struct monitor_write_data) * d->max_vcpus);
+
+ if ( !d->arch.event_write_data )
+ return -ENOMEM;
+
for_each_vcpu ( d, v )
{
if ( v->arch.vm_event.emul_read_data )
@@ -51,6 +59,9 @@ void vm_event_cleanup_domain(struct domain *d)
{
struct vcpu *v;
+ vfree(d->arch.event_write_data);
+ d->arch.event_write_data = NULL;
+
for_each_vcpu ( d, v )
{
xfree(v->arch.vm_event.emul_read_data);
@@ -66,6 +77,38 @@ void vm_event_toggle_singlestep(struct domain *d, struct
vcpu *v)
hvm_toggle_singlestep(v);
}
+void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
+{
+ if ( rsp->flags & VM_EVENT_FLAG_DENY )
+ {
+ struct monitor_write_data *w =
+ &v->domain->arch.event_write_data[v->vcpu_id];
+
+ ASSERT(v->domain->arch.event_write_data != NULL);
+
+ switch ( rsp->reason )
+ {
+ case VM_EVENT_REASON_MOV_TO_MSR:
+ w->do_write.msr = 0;
+ break;
+ case VM_EVENT_REASON_WRITE_CTRLREG:
+ switch ( rsp->u.write_ctrlreg.index )
+ {
+ case VM_EVENT_X86_CR0:
+ w->do_write.cr0 = 0;
+ break;
+ case VM_EVENT_X86_CR3:
+ w->do_write.cr3 = 0;
+ break;
+ case VM_EVENT_X86_CR4:
+ w->do_write.cr4 = 0;
+ break;
+ }
+ break;
+ }
+ }
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 0007d70..4c6bf98 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -393,6 +393,10 @@ void vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
*/
switch ( rsp.reason )
{
+ case VM_EVENT_REASON_MOV_TO_MSR:
+ case VM_EVENT_REASON_WRITE_CTRLREG:
+ vm_event_register_write_resume(v, &rsp);
+ break;
#ifdef HAS_MEM_ACCESS
case VM_EVENT_REASON_MEM_ACCESS:
diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
index 20469a8..0833a65 100644
--- a/xen/include/asm-arm/vm_event.h
+++ b/xen/include/asm-arm/vm_event.h
@@ -21,6 +21,7 @@
#define __ASM_ARM_VM_EVENT_H__
#include <xen/sched.h>
+#include <xen/vm_event.h>
static inline
int vm_event_init_domain(struct domain *d)
@@ -41,4 +42,10 @@ void vm_event_toggle_singlestep(struct domain *d, struct
vcpu *v)
/* Not supported on ARM. */
}
+static inline
+void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
+{
+ /* Not supported on ARM. */
+}
+
#endif /* __ASM_ARM_VM_EVENT_H__ */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 9fbbdd9..7a9e96f 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -247,6 +247,21 @@ struct pv_domain
struct mapcache_domain mapcache;
};
+struct monitor_write_data {
+ struct {
+ unsigned int msr : 1;
+ unsigned int cr0 : 1;
+ unsigned int cr3 : 1;
+ unsigned int cr4 : 1;
+ } do_write;
+
+ uint32_t msr;
+ uint64_t value;
+ uint64_t cr0;
+ uint64_t cr3;
+ uint64_t cr4;
+};
+
struct arch_domain
{
struct page_info *perdomain_l3_pg;
@@ -360,6 +375,8 @@ struct arch_domain
/* Mem_access emulation control */
bool_t mem_access_emulate_enabled;
+
+ struct monitor_write_data *event_write_data;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
@@ -516,7 +533,6 @@ struct arch_vcpu
unsigned long eip;
struct vm_event_emul_read_data *emul_read_data;
} vm_event;
-
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h
index ab5abd0..c082c20 100644
--- a/xen/include/asm-x86/hvm/event.h
+++ b/xen/include/asm-x86/hvm/event.h
@@ -18,8 +18,13 @@
#ifndef __ASM_X86_HVM_EVENT_H__
#define __ASM_X86_HVM_EVENT_H__
-/* Called for current VCPU on crX/MSR changes by guest */
-void hvm_event_cr(unsigned int index, unsigned long value, unsigned long old);
+/*
+ * Called for current VCPU on crX/MSR changes by guest.
+ * The event might not fire if the client has subscribed to it in onchangeonly
+ * mode, hence the bool_t return type for control register write events.
+ */
+bool_t hvm_event_cr(unsigned int index, unsigned long value,
+ unsigned long old);
#define hvm_event_crX(what, new, old) \
hvm_event_cr(VM_EVENT_X86_##what, new, old)
void hvm_event_msr(unsigned int msr, uint64_t value);
diff --git a/xen/include/asm-x86/hvm/support.h
b/xen/include/asm-x86/hvm/support.h
index 05ef5c5..95d3bb2 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -124,11 +124,12 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long
value);
/* These functions all return X86EMUL return codes. */
int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value);
-int hvm_set_cr3(unsigned long value);
-int hvm_set_cr4(unsigned long value);
+int hvm_set_cr0(unsigned long value, bool_t may_defer);
+int hvm_set_cr3(unsigned long value, bool_t may_defer);
+int hvm_set_cr4(unsigned long value, bool_t may_defer);
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
+int hvm_msr_write_intercept(
+ unsigned int msr, uint64_t msr_content, bool_t may_defer);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index 3881783..2bcfe26 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -21,6 +21,7 @@
#define __ASM_X86_VM_EVENT_H__
#include <xen/sched.h>
+#include <xen/vm_event.h>
int vm_event_init_domain(struct domain *d);
@@ -28,4 +29,6 @@ void vm_event_cleanup_domain(struct domain *d);
void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v);
+void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp);
+
#endif /* __ASM_X86_VM_EVENT_H__ */
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index f889139..fbc76b2 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -74,6 +74,11 @@
* VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
*/
#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
+ /*
+ * Deny completion of the operation that triggered the event.
+ * Currently only useful for MSR, CR0, CR3 and CR4 write events.
+ */
+#define VM_EVENT_FLAG_DENY (1 << 6)
/*
* Reasons for the vm event request
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |