|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v3 2/4] x86/hvm: Disable cross-vendor handling in #UD handler
On 13.02.2026 12:42, Alejandro Vallejo wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3832,69 +3832,47 @@ int hvm_descriptor_access_intercept(uint64_t
> exit_info,
> return X86EMUL_OKAY;
> }
>
> -static bool cf_check is_cross_vendor(
> - const struct x86_emulate_state *state, const struct x86_emulate_ctxt
> *ctxt)
> -{
> - switch ( ctxt->opcode )
> - {
> - case X86EMUL_OPC(0x0f, 0x05): /* syscall */
> - case X86EMUL_OPC(0x0f, 0x34): /* sysenter */
> - case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
> - return true;
> - }
> -
> - return false;
> -}
> -
> void hvm_ud_intercept(struct cpu_user_regs *regs)
> {
> struct vcpu *cur = current;
> - bool should_emulate =
> - cur->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor;
> struct hvm_emulate_ctxt ctxt;
> + const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
> + uint32_t walk = PFEC_insn_fetch;
> + unsigned long addr;
> + char sig[5]; /* ud2; .ascii "xen" */
>
> - hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs);
> + if ( !opt_hvm_fep )
> + goto reinject;
Is this possible at all, i.e. shouldn't there be ASSERT_UNREACHABLE() in
addition if already the check is kept?
> - if ( opt_hvm_fep )
> - {
> - const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
> - uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
> - ? PFEC_user_mode : 0) | PFEC_insn_fetch;
Why is this initializer not retained?
> - unsigned long addr;
> - char sig[5]; /* ud2; .ascii "xen" */
> -
> - if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
> - sizeof(sig), hvm_access_insn_fetch,
> - cs, &addr) &&
> - (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> - walk, NULL) == HVMTRANS_okay) &&
> - (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
> - {
> - regs->rip += sizeof(sig);
> - regs->eflags &= ~X86_EFLAGS_RF;
> + hvm_emulate_init_once(&ctxt, NULL, regs);
>
> - /* Zero the upper 32 bits of %rip if not in 64bit mode. */
> - if ( !(hvm_long_mode_active(cur) && cs->l) )
> - regs->rip = (uint32_t)regs->rip;
> + if ( ctxt.seg_reg[x86_seg_ss].dpl == 3 )
> + walk |= PFEC_user_mode;
>
> - add_taint(TAINT_HVM_FEP);
> + if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
> + sizeof(sig), hvm_access_insn_fetch,
> + cs, &addr) &&
> + (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> + walk, NULL) == HVMTRANS_okay) &&
> + (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
> + {
> + regs->rip += sizeof(sig);
> + regs->eflags &= ~X86_EFLAGS_RF;
>
> - should_emulate = true;
> - }
> - }
> + /* Zero the upper 32 bits of %rip if not in 64bit mode. */
> + if ( !(hvm_long_mode_active(cur) && cs->l) )
> + regs->rip = (uint32_t)regs->rip;
>
> - if ( !should_emulate )
> - {
> - hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
> - return;
> + add_taint(TAINT_HVM_FEP);
> }
> + else
> + goto reinject;
>
> switch ( hvm_emulate_one(&ctxt, VIO_no_completion) )
> {
> case X86EMUL_UNHANDLEABLE:
> case X86EMUL_UNIMPLEMENTED:
> - hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
> - break;
> + goto reinject;
How about placing the reinject label here, along with the two case one?
Jan
> case X86EMUL_EXCEPTION:
> hvm_inject_event(&ctxt.ctxt.event);
> /* fall through */
> @@ -3902,6 +3880,11 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
> hvm_emulate_writeback(&ctxt);
> break;
> }
> +
> + return;
> +
> + reinject:
> + hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
> }
>
> enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack
> intack)
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |