|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 3/5] x86/pv: Introduce pv_create_exception_frame()
This is a C implementation of {compat_,}create_bounce_frame(), based loosely
on the existing failsafe implementation in load_segments(). It picks up all
injection information from the trap_bounce structure.
One minor improvement is that at no point is regs->cs left with an rpl of 0 on
the root stack frame.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
v2:
* Use domain_crash() rather than domain_crash_sync(). All callers
immediately continue to {compat_}test_all_events
* Count the number of frame[] entries correctly
* Consistently use 64bit operations when adjusting the root frame
* Introduce a compat_addr_ok() check for the 32bit side. The ASM version
didn't have protection attempting to write into the compat p2m, other than
hitting a #PF while trying.
---
xen/arch/x86/pv/traps.c | 143 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 143 insertions(+)
diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 98549bc..b7d7d2b 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -245,6 +245,149 @@ int pv_raise_interrupt(struct vcpu *v, uint8_t vector)
}
/*
+ * This function emulates the behaviour of hardware when Xen needs to inject
+ * an event into into a guest.
+ *
+ * It may switch from user mode to kernel mode, will write an appropriate
+ * hardware exception frame (including Xen-specific extras), and alter the
+ * root stack frame to invoke the guest kernels correct entry point on exit
+ * from the hypervisor.
+ */
+void pv_create_exception_frame(void)
+{
+ struct vcpu *curr = current;
+ struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce;
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ const bool user_mode_frame = !guest_kernel_mode(curr, regs);
+ uint8_t *evt_mask = &vcpu_info(curr, evtchn_upcall_mask);
+ unsigned int flags, bytes, missing;
+
+ ASSERT_NOT_IN_ATOMIC();
+
+ if ( unlikely(null_trap_bounce(curr, tb)) )
+ {
+ gprintk(XENLOG_ERR, "Fatal: Attempting to inject null trap bounce\n");
+ domain_crash(curr->domain);
+ return;
+ }
+
+ /* Fold the upcall mask and architectural IOPL into the guests rflags. */
+ flags = regs->rflags & ~(X86_EFLAGS_IF | X86_EFLAGS_IOPL);
+ flags |= ((*evt_mask ? 0 : X86_EFLAGS_IF) |
+ (VM_ASSIST(curr->domain, architectural_iopl)
+ ? curr->arch.pv_vcpu.iopl : 0));
+
+ if ( is_pv_32bit_vcpu(curr) )
+ {
+ /* { [ERRCODE,] EIP, CS/MASK , EFLAGS, [ESP, SS] } */
+ unsigned int frame[6], *ptr = frame, ksp =
+ (user_mode_frame ? curr->arch.pv_vcpu.kernel_sp : regs->esp);
+
+ if ( tb->flags & TBF_EXCEPTION_ERRCODE )
+ *ptr++ = tb->error_code;
+
+ *ptr++ = regs->eip;
+ *ptr++ = regs->cs | ((unsigned int)*evt_mask << 16);
+ *ptr++ = flags;
+
+ if ( user_mode_frame )
+ {
+ *ptr++ = regs->esp;
+ *ptr++ = regs->ss;
+ }
+
+ /* Copy the constructed frame to the guest kernel stack. */
+ bytes = _p(ptr) - _p(frame);
+ ksp -= bytes;
+
+ if ( unlikely(!__compat_access_ok(curr->domain, ksp, bytes)) )
+ {
+ gprintk(XENLOG_ERR, "Fatal: Bad guest kernel stack %p\n", _p(ksp));
+ domain_crash(curr->domain);
+ return;
+ }
+
+ if ( unlikely((missing = __copy_to_user(_p(ksp), frame, bytes)) != 0) )
+ {
+ gprintk(XENLOG_ERR, "Fatal: Fault while writing exception
frame\n");
+ show_page_walk(ksp + missing);
+ domain_crash(curr->domain);
+ return;
+ }
+
+ /* Rewrite our stack frame. */
+ regs->rip = (uint32_t)tb->eip;
+ regs->cs = tb->cs;
+ regs->rflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_RF |
+ X86_EFLAGS_NT | X86_EFLAGS_TF);
+ regs->rsp = ksp;
+ if ( user_mode_frame )
+ regs->ss = curr->arch.pv_vcpu.kernel_ss;
+ }
+ else
+ {
+ /* { RCX, R11, [ERRCODE,] RIP, CS/MASK, RFLAGS, RSP, SS } */
+ unsigned long frame[8], *ptr = frame, ksp =
+ (user_mode_frame ? curr->arch.pv_vcpu.kernel_sp : regs->rsp) &
~0xf;
+
+ if ( user_mode_frame )
+ toggle_guest_mode(curr);
+
+ *ptr++ = regs->rcx;
+ *ptr++ = regs->r11;
+
+ if ( tb->flags & TBF_EXCEPTION_ERRCODE )
+ *ptr++ = tb->error_code;
+
+ *ptr++ = regs->rip;
+ *ptr++ = ((user_mode_frame ? regs->cs : regs->cs & ~3) |
+ ((unsigned long)*evt_mask << 32));
+ *ptr++ = flags;
+ *ptr++ = regs->rsp;
+ *ptr++ = regs->ss;
+
+ /* Copy the constructed frame to the guest kernel stack. */
+ bytes = _p(ptr) - _p(frame);
+ ksp -= bytes;
+
+ if ( unlikely(!__addr_ok(ksp)) )
+ {
+ gprintk(XENLOG_ERR, "Fatal: Bad guest kernel stack %p\n", _p(ksp));
+ domain_crash(curr->domain);
+ return;
+ }
+
+ if ( unlikely((missing = __copy_to_user(_p(ksp), frame, bytes)) != 0) )
+ {
+ gprintk(XENLOG_ERR, "Fatal: Fault while writing exception
frame\n");
+ show_page_walk(ksp + missing);
+ domain_crash(curr->domain);
+ return;
+ }
+
+ /* Rewrite our stack frame. */
+ regs->entry_vector |= TRAP_syscall;
+ regs->rip = tb->eip;
+ regs->cs = FLAT_KERNEL_CS;
+ regs->rflags &= ~(X86_EFLAGS_AC | X86_EFLAGS_VM | X86_EFLAGS_RF |
+ X86_EFLAGS_NT | X86_EFLAGS_TF);
+ regs->rsp = ksp;
+ regs->ss = FLAT_KERNEL_SS;
+ }
+
+ /* Mask events if requested. */
+ if ( tb->flags & TBF_INTERRUPT )
+ *evt_mask = 1;
+
+ /*
+ * Clobber the injection information now it has been completed. Buggy
+ * attempts to inject the same event twice will hit the null_trap_bounce()
+ * check above.
+ */
+ *tb = (struct trap_bounce){};
+}
+
+/*
* Local variables:
* mode: C
* c-file-style: "BSD"
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |