|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.9 2/2] x86/pv: Replace do_guest_trap() with pv_inject_hw_exception()
do_guest_trap() is now functionally equivelent to pv_inject_hw_exception(),
but with a less useful API as it requires the error code parameter to be
passed implicitly via cpu_user_regs.
Extend pv_inject_event() with a further assertion which checks that hardware
exception vectors are below 32, which is an x86 architectural expectation.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
While not strictly a bugfix for 4.9, it would be nice to have it included (in
light of the previous patch) to avoid the function duplication.
---
xen/arch/x86/traps.c | 63 +++++++++++++++++++++++-----------------------------
1 file changed, 28 insertions(+), 35 deletions(-)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index b2421c9..105c7c1 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -640,6 +640,8 @@ void pv_inject_event(const struct x86_event *event)
ASSERT(event->type == X86_EVENTTYPE_HW_EXCEPTION ||
event->type == X86_EVENTTYPE_SW_INTERRUPT);
ASSERT(vector == event->vector); /* Confirm no truncation. */
+ if ( event->type == X86_EVENTTYPE_HW_EXCEPTION )
+ ASSERT(vector < 32);
if ( use_error_code )
ASSERT(error_code != X86_EVENT_NO_EC);
else
@@ -688,19 +690,6 @@ void pv_inject_event(const struct x86_event *event)
}
}
-static inline void do_guest_trap(unsigned int trapnr,
- const struct cpu_user_regs *regs)
-{
- const struct x86_event event = {
- .vector = trapnr,
- .type = X86_EVENTTYPE_HW_EXCEPTION,
- .error_code = (((trapnr < 32) && (TRAP_HAVE_EC & (1u << trapnr)))
- ? regs->error_code : X86_EVENT_NO_EC),
- };
-
- pv_inject_event(&event);
-}
-
static void instruction_done(struct cpu_user_regs *regs, unsigned long rip)
{
regs->rip = rip;
@@ -708,7 +697,7 @@ static void instruction_done(struct cpu_user_regs *regs,
unsigned long rip)
if ( regs->eflags & X86_EFLAGS_TF )
{
current->arch.debugreg[6] |= DR_STEP | DR_STATUS_RESERVED_ONE;
- do_guest_trap(TRAP_debug, regs);
+ pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
}
@@ -756,7 +745,7 @@ int set_guest_machinecheck_trapbounce(void)
struct vcpu *v = current;
struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
- do_guest_trap(TRAP_machine_check, guest_cpu_user_regs());
+ pv_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
return !null_trap_bounce(v, tb);
}
@@ -769,7 +758,7 @@ int set_guest_nmi_trapbounce(void)
{
struct vcpu *v = current;
struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
- do_guest_trap(TRAP_nmi, guest_cpu_user_regs());
+ pv_inject_hw_exception(TRAP_nmi, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
return !null_trap_bounce(v, tb);
}
@@ -797,9 +786,13 @@ void do_trap(struct cpu_user_regs *regs)
if ( debugger_trap_entry(trapnr, regs) )
return;
+ ASSERT(trapnr < 32);
+
if ( guest_mode(regs) )
{
- do_guest_trap(trapnr, regs);
+ pv_inject_hw_exception(trapnr,
+ (TRAP_HAVE_EC & (1u << trapnr))
+ ? regs->error_code : X86_EVENT_NO_EC);
return;
}
@@ -1065,7 +1058,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
if ( current->arch.cpuid_faulting && !guest_kernel_mode(current, regs) )
{
regs->rip = eip;
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return EXCRET_fault_fixed;
}
@@ -1101,7 +1094,7 @@ void do_invalid_op(struct cpu_user_regs *regs)
{
if ( !emulate_invalid_rdtscp(regs) &&
!emulate_forced_invalid_op(regs) )
- do_guest_trap(TRAP_invalid_op, regs);
+ pv_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
return;
}
@@ -1229,7 +1222,7 @@ void do_int3(struct cpu_user_regs *regs)
return;
}
- do_guest_trap(TRAP_int3, regs);
+ pv_inject_hw_exception(TRAP_int3, X86_EVENT_NO_EC);
}
static void reserved_bit_page_fault(
@@ -3043,7 +3036,7 @@ static int emulate_privileged_op(struct cpu_user_regs
*regs)
{
curr->arch.debugreg[6] |= ctxt.bpmatch | DR_STATUS_RESERVED_ONE;
if ( !(curr->arch.pv_vcpu.trap_bounce.flags & TBF_EXCEPTION) )
- do_guest_trap(TRAP_debug, regs);
+ pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
/* fall through */
case X86EMUL_RETRY:
@@ -3158,12 +3151,12 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
(((ar >> 13) & 3) < (regs->cs & 3)) ||
((ar & _SEGMENT_TYPE) != 0xc00) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
if ( !(ar & _SEGMENT_P) )
{
- do_guest_trap(TRAP_no_segment, regs);
+ pv_inject_hw_exception(TRAP_no_segment, regs->error_code);
return;
}
dpl = (ar >> 13) & 3;
@@ -3179,7 +3172,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
!(ar & _SEGMENT_P) ||
!(ar & _SEGMENT_CODE) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
@@ -3192,7 +3185,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
if ( PTR_ERR(state) == -X86EMUL_EXCEPTION )
pv_inject_event(&ctxt.ctxt.event);
else
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
@@ -3242,7 +3235,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
(opnd_sel & ~3) != regs->error_code ||
dpl < (opnd_sel & 3) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
@@ -3290,7 +3283,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
/* Inner stack known only for kernel ring. */
if ( (sel & 3) != GUEST_KERNEL_RPL(v->domain) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
esp = v->arch.pv_vcpu.kernel_sp;
@@ -3314,7 +3307,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
stkp = (unsigned int *)(unsigned long)((unsigned int)base + esp);
if ( !compat_access_ok(stkp - 4 - nparm, (4 + nparm) * 4) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
push(regs->ss);
@@ -3329,12 +3322,12 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
(ar & _SEGMENT_CODE) ||
!(ar & _SEGMENT_WR) ||
!check_stack_limit(ar, limit, esp + nparm * 4, nparm * 4)
)
- return do_guest_trap(TRAP_gp_fault, regs);
+ return pv_inject_hw_exception(TRAP_gp_fault,
regs->error_code);
ustkp = (unsigned int *)(unsigned long)
((unsigned int)base + regs->esp + nparm * 4);
if ( !compat_access_ok(ustkp - nparm, nparm * 4) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
do
@@ -3360,7 +3353,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
if ( !read_descriptor(ss, v, &base, &limit, &ar, 0) ||
((ar >> 13) & 3) != (sel & 3) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
if ( !check_stack_limit(ar, limit, esp, 2 * 4) )
@@ -3371,7 +3364,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
stkp = (unsigned int *)(unsigned long)((unsigned int)base + esp);
if ( !compat_access_ok(stkp - 2, 2 * 4) )
{
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
}
@@ -3451,7 +3444,7 @@ void do_general_protection(struct cpu_user_regs *regs)
}
/* Pass on GPF as is. */
- do_guest_trap(TRAP_gp_fault, regs);
+ pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
gp_in_kernel:
@@ -3671,7 +3664,7 @@ void do_device_not_available(struct cpu_user_regs *regs)
if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS )
{
- do_guest_trap(TRAP_no_device, regs);
+ pv_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC);
curr->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS;
}
else
@@ -3744,7 +3737,7 @@ void do_debug(struct cpu_user_regs *regs)
v->arch.debugreg[6] = read_debugreg(6);
ler_enable();
- do_guest_trap(TRAP_debug, regs);
+ pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
return;
out:
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |