|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v2 11/12] x86: modify interrupt handlers to support stack switching
Modify the interrupt handlers to switch stacks on interrupt entry in
case they are running on a per-vcpu stack. Same applies to returning
to the guest: in case the to be loaded context is located on a
per-vcpu stack switch to this one before returning to the guest.
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
xen/arch/x86/x86_64/asm-offsets.c | 4 ++++
xen/arch/x86/x86_64/compat/entry.S | 5 ++++-
xen/arch/x86/x86_64/entry.S | 15 +++++++++++++--
xen/common/wait.c | 8 ++++----
xen/include/asm-x86/asm_defns.h | 19 +++++++++++++++++++
xen/include/asm-x86/current.h | 10 +++++++++-
6 files changed, 53 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/x86_64/asm-offsets.c
b/xen/arch/x86/x86_64/asm-offsets.c
index e136af6b99..0da756e7af 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -137,6 +137,10 @@ void __dummy__(void)
OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
+ OFFSET(CPUINFO_stack_bottom_cpu, struct cpu_info, stack_bottom_cpu);
+ OFFSET(CPUINFO_flags, struct cpu_info, flags);
+ DEFINE(ASM_ON_VCPUSTACK, ON_VCPUSTACK);
+ DEFINE(ASM_VCPUSTACK_ACTIVE, VCPUSTACK_ACTIVE);
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
BLANK();
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index abf3fcae48..b8d74e83db 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -19,6 +19,7 @@ ENTRY(entry_int82)
movl $HYPERCALL_VECTOR, 4(%rsp)
SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */
mov %rsp, %rdi
+ SWITCH_FROM_VCPU_STACK
CR4_PV32_RESTORE
GET_CURRENT(bx)
@@ -109,6 +110,7 @@ compat_process_trap:
/* %rbx: struct vcpu, interrupts disabled */
ENTRY(compat_restore_all_guest)
ASSERT_INTERRUPTS_DISABLED
+ SWITCH_TO_VCPU_STACK
mov $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11d
and UREGS_eflags(%rsp),%r11d
.Lcr4_orig:
@@ -195,7 +197,6 @@ ENTRY(compat_post_handle_exception)
/* See lstar_enter for entry register state. */
ENTRY(cstar_enter)
- sti
CR4_PV32_RESTORE
movq 8(%rsp),%rax /* Restore %rax. */
movq $FLAT_KERNEL_SS,8(%rsp)
@@ -206,6 +207,8 @@ ENTRY(cstar_enter)
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
movq %rsp, %rdi
+ SWITCH_FROM_VCPU_STACK
+ sti
GET_CURRENT(bx)
movq VCPU_domain(%rbx),%rcx
cmpb $0,DOMAIN_is_32bit_pv(%rcx)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index f7412b87c2..991a8799a9 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -37,6 +37,7 @@ ENTRY(switch_to_kernel)
/* %rbx: struct vcpu, interrupts disabled */
restore_all_guest:
ASSERT_INTERRUPTS_DISABLED
+ SWITCH_TO_VCPU_STACK
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
jz iret_exit_to_guest
@@ -71,6 +72,7 @@ iret_exit_to_guest:
ALIGN
/* No special register assumptions. */
restore_all_xen:
+ SWITCH_TO_VCPU_STACK
RESTORE_ALL adj=8
iretq
@@ -91,7 +93,6 @@ restore_all_xen:
* %ss must be saved into the space left by the trampoline.
*/
ENTRY(lstar_enter)
- sti
movq 8(%rsp),%rax /* Restore %rax. */
movq $FLAT_KERNEL_SS,8(%rsp)
pushq %r11
@@ -101,6 +102,8 @@ ENTRY(lstar_enter)
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
mov %rsp, %rdi
+ SWITCH_FROM_VCPU_STACK
+ sti
GET_CURRENT(bx)
testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jz switch_to_kernel
@@ -189,7 +192,6 @@ process_trap:
jmp test_all_events
ENTRY(sysenter_entry)
- sti
pushq $FLAT_USER_SS
pushq $0
pushfq
@@ -201,6 +203,8 @@ GLOBAL(sysenter_eflags_saved)
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
movq %rsp, %rdi
+ SWITCH_FROM_VCPU_STACK
+ sti
GET_CURRENT(bx)
cmpb $0,VCPU_sysenter_disables_events(%rbx)
movq VCPU_sysenter_addr(%rbx),%rax
@@ -237,6 +241,7 @@ ENTRY(int80_direct_trap)
movl $0x80, 4(%rsp)
SAVE_ALL
mov %rsp, %rdi
+ SWITCH_FROM_VCPU_STACK
cmpb $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
@@ -408,6 +413,7 @@ ENTRY(dom_crash_sync_extable)
ENTRY(common_interrupt)
SAVE_ALL CLAC
movq %rsp,%rdi
+ SWITCH_FROM_VCPU_STACK
CR4_PV32_RESTORE
pushq %rdi
callq do_IRQ
@@ -430,6 +436,7 @@ ENTRY(page_fault)
GLOBAL(handle_exception)
SAVE_ALL CLAC
movq %rsp, %rdi
+ SWITCH_FROM_VCPU_STACK
handle_exception_saved:
GET_CURRENT(bx)
testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rdi)
@@ -607,6 +614,7 @@ ENTRY(double_fault)
/* Set AC to reduce chance of further SMAP faults */
SAVE_ALL STAC
movq %rsp,%rdi
+ SWITCH_FROM_VCPU_STACK_IST
call do_double_fault
BUG /* do_double_fault() shouldn't return. */
@@ -615,7 +623,9 @@ ENTRY(early_page_fault)
movl $TRAP_page_fault,4(%rsp)
SAVE_ALL
movq %rsp,%rdi
+ SWITCH_FROM_VCPU_STACK
call do_early_page_fault
+ movq %rsp, %rdi
jmp restore_all_xen
.popsection
@@ -625,6 +635,7 @@ ENTRY(nmi)
handle_ist_exception:
SAVE_ALL CLAC
movq %rsp, %rdi
+ SWITCH_FROM_VCPU_STACK_IST
CR4_PV32_RESTORE
movq %rdi,%rdx
movq %rdi,%rbx
diff --git a/xen/common/wait.c b/xen/common/wait.c
index a57bc10d61..fbb5d996e5 100644
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -122,10 +122,10 @@ void wake_up_all(struct waitqueue_head *wq)
static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
{
- struct cpu_info *cpu_info = get_cpu_info();
+ struct cpu_user_regs *user_regs = guest_cpu_user_regs();
struct vcpu *curr = current;
unsigned long dummy;
- u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector;
+ u32 entry_vector = user_regs->entry_vector;
ASSERT(wqv->esp == 0);
@@ -160,7 +160,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
"pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
"pop %%rbp; pop %%rdx; pop %%rbx; pop %%rax"
: "=&S" (wqv->esp), "=&c" (dummy), "=&D" (dummy)
- : "i" (PAGE_SIZE), "0" (0), "1" (cpu_info), "2" (wqv->stack)
+ : "i" (PAGE_SIZE), "0" (0), "1" (user_regs), "2" (wqv->stack)
: "memory" );
if ( unlikely(wqv->esp == 0) )
@@ -169,7 +169,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
domain_crash_synchronous();
}
- cpu_info->guest_cpu_user_regs.entry_vector = entry_vector;
+ user_regs->entry_vector = entry_vector;
}
static void __finish_wait(struct waitqueue_vcpu *wqv)
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index ae9fef7450..e759726a4b 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -116,6 +116,25 @@ void ret_from_intr(void);
GET_STACK_END(reg); \
__GET_CURRENT(reg)
+#define SWITCH_FROM_VCPU_STACK \
+ GET_STACK_END(ax); \
+ testb $ASM_ON_VCPUSTACK, STACK_CPUINFO_FIELD(flags)(%rax); \
+ jz 1f; \
+ movq STACK_CPUINFO_FIELD(stack_bottom_cpu)(%rax), %rsp; \
+1:
+
+#define SWITCH_FROM_VCPU_STACK_IST \
+ GET_STACK_END(ax); \
+ testb $ASM_ON_VCPUSTACK, STACK_CPUINFO_FIELD(flags)(%rax); \
+ jz 1f; \
+ subq $(CPUINFO_sizeof - 1), %rax; \
+ addq CPUINFO_stack_bottom_cpu(%rax), %rsp; \
+ subq %rax, %rsp; \
+1:
+
+#define SWITCH_TO_VCPU_STACK \
+ movq %rdi, %rsp
+
#ifndef NDEBUG
#define ASSERT_NOT_IN_ATOMIC \
sti; /* sometimes called with interrupts disabled: safe to enable */ \
diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
index 6ae0931a59..4b7e9104be 100644
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -9,6 +9,7 @@
#include <xen/percpu.h>
#include <public/xen.h>
+#include <asm/config.h>
#include <asm/page.h>
/*
@@ -94,9 +95,16 @@ static inline struct cpu_info *get_cpu_info(void)
#define set_processor_id(id) do { \
struct cpu_info *ci__ = get_cpu_info(); \
ci__->per_cpu_offset = __per_cpu_offset[ci__->processor_id = (id)]; \
+ ci__->flags = 0; \
} while (0)
-#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
+#define guest_cpu_user_regs() ({ \
+ struct cpu_info *info = get_cpu_info(); \
+ if ( info->flags & VCPUSTACK_ACTIVE ) \
+ info = (struct cpu_info *)(XPTI_START(info->current_vcpu) + \
+ STACK_SIZE) - 1; \
+ &info->guest_cpu_user_regs; \
+})
/*
* Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points
--
2.13.6
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |