|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] x86/hvm: Unilaterally inject #UD for unknown VMExits
While we do this for unknown user mode exits, crashing for supervisor mode
exits is unhelpful. Intel in particular expect the unknown case to be #UD
because they do introduce new instructions with new VMEXIT_* codes without
other enablement controls. e.g. MSRLIST, USER_MSR, MSR_IMM, but AMD have
RDPRU and SKINIT as examples too.
A guest which gets its CPUID checks wrong can trigger the VMExit, and the
VMexit handler would need to emulate the CPUID check and #UD anyway. This
would be a guest bug, and giving it an exception is the right course of
action.
Drop the "#UD or crash" semantics and make it exclusively #UD. Rename the
lables to match the changed semantics. Fold the cases which were plain #UD's
too.
One case that was wrong beforehand was EPT_MISCONFIG. Failing to resolve is
always a Xen bug, and not even necesserily due to the instruction under %rip.
Convert it to be an unconditional domain_crash() with applicable diagnostic
information.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
xen/arch/x86/hvm/svm/svm.c | 29 +++++++++--------------------
xen/arch/x86/hvm/vmx/vmx.c | 26 +++++++++++---------------
2 files changed, 20 insertions(+), 35 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 2d7c598ffe99..637b47084c25 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -66,15 +66,6 @@ static bool amd_erratum383_found __read_mostly;
static uint64_t osvw_length, osvw_status;
static DEFINE_SPINLOCK(osvw_lock);
-/* Only crash the guest if the problem originates in kernel mode. */
-static void svm_crash_or_fault(struct vcpu *v)
-{
- if ( vmcb_get_cpl(v->arch.hvm.svm.vmcb) )
- hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
- else
- domain_crash(v->domain);
-}
-
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len)
{
struct vcpu *curr = current;
@@ -85,7 +76,7 @@ void __update_guest_eip(struct cpu_user_regs *regs, unsigned
int inst_len)
if ( unlikely(inst_len > MAX_INST_LEN) )
{
gdprintk(XENLOG_ERR, "Bad instruction length %u\n", inst_len);
- svm_crash_or_fault(curr);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
return;
}
@@ -2872,7 +2863,7 @@ void asmlinkage svm_vmexit_handler(void)
* task switch. Decode under %rip to find its length.
*/
if ( insn_len < 0 && (insn_len = svm_get_task_switch_insn_len()) == 0 )
- goto crash_or_fault;
+ goto inject_ud;
hvm_task_switch(vmcb->ei.task_switch.sel,
vmcb->ei.task_switch.iret ? TSW_iret :
@@ -2984,13 +2975,6 @@ void asmlinkage svm_vmexit_handler(void)
svm_vmexit_do_rdtsc(regs, exit_reason == VMEXIT_RDTSCP);
break;
- case VMEXIT_MONITOR:
- case VMEXIT_MWAIT:
- case VMEXIT_SKINIT:
- case VMEXIT_RDPRU:
- hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
- break;
-
case VMEXIT_VMRUN:
svm_vmexit_do_vmrun(regs, v, regs->rax);
break;
@@ -3083,8 +3067,13 @@ void asmlinkage svm_vmexit_handler(void)
gprintk(XENLOG_ERR, "Unexpected vmexit: reason %#"PRIx64", "
"exitinfo1 %#"PRIx64", exitinfo2 %#"PRIx64"\n",
exit_reason, vmcb->exitinfo1, vmcb->exitinfo2);
- crash_or_fault:
- svm_crash_or_fault(v);
+ fallthrough;
+ case VMEXIT_MONITOR:
+ case VMEXIT_MWAIT:
+ case VMEXIT_SKINIT:
+ case VMEXIT_RDPRU:
+ inject_ud:
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 6b407226c44c..1af5861ef921 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4337,7 +4337,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
__vmread(EXIT_QUALIFICATION, &exit_qualification);
rc = hvm_monitor_vmexit(exit_reason, exit_qualification);
if ( rc < 0 )
- goto exit_and_crash;
+ goto unexpected_vmexit;
if ( rc )
return;
}
@@ -4490,7 +4490,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
trap_type, insn_len, 0);
if ( rc < 0 )
- goto exit_and_crash;
+ goto unexpected_vmexit;
if ( !rc )
vmx_propagate_intr(intr_info);
}
@@ -4511,7 +4511,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
insn_len, 0);
if ( rc < 0 )
- goto exit_and_crash;
+ goto unexpected_vmexit;
if ( !rc )
vmx_propagate_intr(intr_info);
}
@@ -4557,7 +4557,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
case X86_EXC_NMI:
if ( MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) !=
X86_ET_NMI )
- goto exit_and_crash;
+ goto unexpected_vmexit;
TRACE(TRC_HVM_NMI);
/* Already handled above. */
break;
@@ -4571,7 +4571,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
break;
default:
TRACE(TRC_HVM_TRAP, vector);
- goto exit_and_crash;
+ goto unexpected_vmexit;
}
break;
}
@@ -4627,7 +4627,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
* rc > 0 paused waiting for response, work here is done
*/
if ( rc < 0 )
- goto exit_and_crash;
+ goto unexpected_vmexit;
if ( !rc )
update_guest_eip(); /* Safe: CPUID */
break;
@@ -4773,7 +4773,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
rc = hvm_monitor_io(io_qual.port, bytes, io_qual.in, io_qual.str);
if ( rc < 0 )
- goto exit_and_crash;
+ goto unexpected_vmexit;
if ( rc )
break;
@@ -4820,7 +4820,8 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
__vmread(GUEST_PHYSICAL_ADDRESS, &gpa);
if ( !ept_handle_misconfig(gpa) )
- goto exit_and_crash;
+ domain_crash(v->domain,
+ "Unhandled EPT_MISCONFIG, gpa %#"PRIpaddr"\n", gpa);
break;
}
@@ -4902,14 +4903,9 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
case EXIT_REASON_INVPCID:
/* fall through */
default:
- exit_and_crash:
+ unexpected_vmexit:
gprintk(XENLOG_ERR, "Unexpected vmexit: reason %lu\n", exit_reason);
-
- if ( vmx_get_cpl() )
- hvm_inject_hw_exception(X86_EXC_UD,
- X86_EVENT_NO_EC);
- else
- domain_crash(v->domain);
+ hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
break;
}
base-commit: 117a46287427db2a6f5fe219eb2031d7ca39b603
--
2.39.5
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |