|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 2/4] x86/hvm: Disable cross-vendor handling in #UD handler
Remove cross-vendor support now that VMs can no longer have a different
vendor than the host.
While at it, refactor the function to exit early and skip initialising
the emulation context when FEP is not enabled.
No functional change intended.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
v4:
* Reverted refactor of the `walk` variable assignment
* Added ASSERT_UNREACHABLE() to the !hvm_fep path.
* Moved the `reinject` label to the UNIMPLEMENTED case in the emulator
result handler.
---
xen/arch/x86/hvm/hvm.c | 73 +++++++++++++++-----------------------
xen/arch/x86/hvm/svm/svm.c | 3 +-
xen/arch/x86/hvm/vmx/vmx.c | 3 +-
3 files changed, 30 insertions(+), 49 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4d37a93c57a..4280acfc074 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3832,67 +3832,50 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
return X86EMUL_OKAY;
}
-static bool cf_check is_cross_vendor(
- const struct x86_emulate_state *state, const struct x86_emulate_ctxt *ctxt)
-{
- switch ( ctxt->opcode )
- {
- case X86EMUL_OPC(0x0f, 0x05): /* syscall */
- case X86EMUL_OPC(0x0f, 0x34): /* sysenter */
- case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
- return true;
- }
-
- return false;
-}
-
void hvm_ud_intercept(struct cpu_user_regs *regs)
{
struct vcpu *cur = current;
- bool should_emulate =
- cur->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor;
struct hvm_emulate_ctxt ctxt;
+ const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
+ uint32_t walk;
+ unsigned long addr;
+ char sig[5]; /* ud2; .ascii "xen" */
- hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs);
-
- if ( opt_hvm_fep )
+ if ( !opt_hvm_fep )
{
- const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
- uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
- ? PFEC_user_mode : 0) | PFEC_insn_fetch;
- unsigned long addr;
- char sig[5]; /* ud2; .ascii "xen" */
-
- if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
- sizeof(sig), hvm_access_insn_fetch,
- cs, &addr) &&
- (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
- walk, NULL) == HVMTRANS_okay) &&
- (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
- {
- regs->rip += sizeof(sig);
- regs->eflags &= ~X86_EFLAGS_RF;
-
- /* Zero the upper 32 bits of %rip if not in 64bit mode. */
- if ( !(hvm_long_mode_active(cur) && cs->l) )
- regs->rip = (uint32_t)regs->rip;
+ ASSERT_UNREACHABLE();
+ goto reinject;
+ }
- add_taint(TAINT_HVM_FEP);
+ hvm_emulate_init_once(&ctxt, NULL, regs);
- should_emulate = true;
- }
- }
+ walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
+ ? PFEC_user_mode : 0) | PFEC_insn_fetch;
- if ( !should_emulate )
+ if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
+ sizeof(sig), hvm_access_insn_fetch,
+ cs, &addr) &&
+ (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+ walk, NULL) == HVMTRANS_okay) &&
+ (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
{
- hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
- return;
+ regs->rip += sizeof(sig);
+ regs->eflags &= ~X86_EFLAGS_RF;
+
+ /* Zero the upper 32 bits of %rip if not in 64bit mode. */
+ if ( !(hvm_long_mode_active(cur) && cs->l) )
+ regs->rip = (uint32_t)regs->rip;
+
+ add_taint(TAINT_HVM_FEP);
}
+ else
+ goto reinject;
switch ( hvm_emulate_one(&ctxt, VIO_no_completion) )
{
case X86EMUL_UNHANDLEABLE:
case X86EMUL_UNIMPLEMENTED:
+ reinject:
hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
case X86EMUL_EXCEPTION:
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 243c41fb13a..20591c4a44f 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -589,8 +589,7 @@ static void cf_check svm_cpuid_policy_changed(struct vcpu
*v)
const struct cpu_policy *cp = v->domain->arch.cpu_policy;
u32 bitmap = vmcb_get_exception_intercepts(vmcb);
- if ( opt_hvm_fep ||
- (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
+ if ( opt_hvm_fep )
bitmap |= (1U << X86_EXC_UD);
else
bitmap &= ~(1U << X86_EXC_UD);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 82c55f49aea..eda99e268d1 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -803,8 +803,7 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu
*v)
const struct cpu_policy *cp = v->domain->arch.cpu_policy;
int rc = 0;
- if ( opt_hvm_fep ||
- (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
+ if ( opt_hvm_fep )
v->arch.hvm.vmx.exception_bitmap |= (1U << X86_EXC_UD);
else
v->arch.hvm.vmx.exception_bitmap &= ~(1U << X86_EXC_UD);
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |