|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] x86emul: allow ->write_msr() to distinguish origins of writes
Only explicit writes are subject to e.g. the checking of the MSR intercept
bitmap, which extends to VM-event's hvm_monitor_msr(). Indicate, by way of
a new boolean parameter, to the hook functions which variant it is.
Fixes: 6eb43fcf8a0b ("x86emul: support SWAPGS")
Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Later, in particular for nested, ->read_msr() may need to gain a similar
parameter.
Prior to nested making use of it, the new parameter is effectively dead
code with VM_EVENT=n. If we accepted Misra rule 2.2, something would
likely need doing about this.
I've suitably re-based "x86emul: misc additions" on top of this, but I
don't think I'll re-post it just for that.
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -569,7 +569,8 @@ static int fuzz_read_msr(
static int fuzz_write_msr(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
struct fuzz_state *s = ctxt->data;
struct fuzz_corpus *c = s->corpus;
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1705,7 +1705,8 @@ static int cf_check hvmemul_write_io_dis
static int cf_check hvmemul_write_msr_discard(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
return X86EMUL_OKAY;
}
@@ -2427,9 +2428,10 @@ static int cf_check hvmemul_read_msr(
static int cf_check hvmemul_write_msr(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
- int rc = hvm_msr_write_intercept(reg, val, true);
+ int rc = hvm_msr_write_intercept(reg, val, explicit);
if ( rc == X86EMUL_EXCEPTION )
x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -1038,7 +1038,8 @@ static int cf_check read_msr(
}
static int cf_check write_msr(
- unsigned int reg, uint64_t val, struct x86_emulate_ctxt *ctxt)
+ unsigned int reg, uint64_t val, struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
struct vcpu *curr = current;
const struct domain *currd = curr->domain;
--- a/xen/arch/x86/x86_emulate/0f01.c
+++ b/xen/arch/x86/x86_emulate/0f01.c
@@ -40,7 +40,7 @@ int x86emul_0f01(struct x86_emulate_stat
fail_if(!ops->write_msr);
rc = ops->write_msr(regs->ecx,
((uint64_t)regs->r(dx) << 32) | regs->eax,
- ctxt);
+ ctxt, true);
goto done;
}
generate_exception(X86_EXC_UD);
@@ -194,7 +194,7 @@ int x86emul_0f01(struct x86_emulate_stat
(rc = ops->read_msr(MSR_SHADOW_GS_BASE, &msr_val,
ctxt)) != X86EMUL_OKAY ||
(rc = ops->write_msr(MSR_SHADOW_GS_BASE, sreg.base,
- ctxt)) != X86EMUL_OKAY )
+ ctxt, false)) != X86EMUL_OKAY )
goto done;
sreg.base = msr_val;
if ( (rc = ops->write_segment(x86_seg_gs, &sreg,
@@ -202,7 +202,7 @@ int x86emul_0f01(struct x86_emulate_stat
{
/* Best effort unwind (i.e. no real error checking). */
if ( ops->write_msr(MSR_SHADOW_GS_BASE, msr_val,
- ctxt) == X86EMUL_EXCEPTION )
+ ctxt, false) == X86EMUL_EXCEPTION )
x86_emul_reset_event(ctxt);
goto done;
}
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -3814,7 +3814,7 @@ x86_emulate(
fail_if(ops->write_msr == NULL);
if ( (rc = ops->write_msr(_regs.ecx,
((uint64_t)_regs.r(dx) << 32) | _regs.eax,
- ctxt)) != 0 )
+ ctxt, true)) != 0 )
goto done;
break;
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -470,13 +470,15 @@ struct x86_emulate_ops
struct x86_emulate_ctxt *ctxt);
/*
- * write_dr: Write to model-specific register.
- * @reg: [IN ] Register to write.
+ * write_msr: Write to model-specific register.
+ * @reg: [IN ] Register to write.
+ * @explicit: [IN ] Whether this is an explicit WRMSR or alike.
*/
int (*write_msr)(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt);
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit);
/*
* cache_op: Write-back and/or invalidate cache contents.
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |