[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v2 05/12] x86: don't access saved user regs via rsp in trap handlers
In order to support switching stacks when entering the hypervisor for support of page table isolation, don't use %rsp for accessing the saved user registers, but do that via %rdi. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- xen/arch/x86/x86_64/compat/entry.S | 82 +++++++++++++---------- xen/arch/x86/x86_64/entry.S | 129 +++++++++++++++++++++++-------------- xen/include/asm-x86/current.h | 10 ++- 3 files changed, 134 insertions(+), 87 deletions(-) diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index 3fea54ee9d..abf3fcae48 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -18,14 +18,14 @@ ENTRY(entry_int82) pushq $0 movl $HYPERCALL_VECTOR, 4(%rsp) SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */ + mov %rsp, %rdi CR4_PV32_RESTORE GET_CURRENT(bx) - mov %rsp, %rdi call do_entry_int82 -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ ENTRY(compat_test_all_events) ASSERT_NOT_IN_ATOMIC cli # tests must not race interrupts @@ -58,20 +58,24 @@ compat_test_guest_events: jmp compat_test_all_events ALIGN -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ compat_process_softirqs: sti + pushq %rdi call do_softirq + popq %rdi jmp compat_test_all_events ALIGN -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ compat_process_mce: testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx) jnz .Lcompat_test_guest_nmi sti movb $0,VCPU_mce_pending(%rbx) + pushq %rdi call set_guest_machinecheck_trapbounce + popq %rdi testl %eax,%eax jz compat_test_all_events movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the @@ -81,13 +85,15 @@ compat_process_mce: jmp compat_process_trap ALIGN -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ compat_process_nmi: testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx) jnz compat_test_guest_events sti movb $0,VCPU_nmi_pending(%rbx) + pushq %rdi call set_guest_nmi_trapbounce + popq %rdi testl %eax,%eax jz compat_test_all_events movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the @@ -178,7 +184,7 @@ ENTRY(cr4_pv32_restore) xor %eax, %eax ret -/* %rdx: trap_bounce, %rbx: struct vcpu */ +/* %rdx: trap_bounce, %rbx: struct vcpu, %rdi: user_regs */ ENTRY(compat_post_handle_exception) testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx) jz compat_test_all_events @@ -199,6 +205,7 @@ ENTRY(cstar_enter) pushq $0 movl $TRAP_syscall, 4(%rsp) SAVE_ALL + movq %rsp, %rdi GET_CURRENT(bx) movq VCPU_domain(%rbx),%rcx cmpb $0,DOMAIN_is_32bit_pv(%rcx) @@ -211,13 +218,15 @@ ENTRY(cstar_enter) testl $~3,%esi leal (,%rcx,TBF_INTERRUPT),%ecx UNLIKELY_START(z, compat_syscall_gpf) - movq VCPU_trap_ctxt(%rbx),%rdi - movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) - subl $2,UREGS_rip(%rsp) + pushq %rcx + movq VCPU_trap_ctxt(%rbx),%rcx + movl $TRAP_gp_fault,UREGS_entry_vector(%rdi) + subl $2,UREGS_rip(%rdi) movl $0,TRAPBOUNCE_error_code(%rdx) - movl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rdi),%eax - movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rdi),%esi - testb $4,TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_flags(%rdi) + movl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rcx),%eax + movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rcx),%esi + testb $4,TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_flags(%rcx) + popq %rcx setnz %cl leal TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx UNLIKELY_END(compat_syscall_gpf) @@ -229,12 +238,12 @@ UNLIKELY_END(compat_syscall_gpf) ENTRY(compat_sysenter) CR4_PV32_RESTORE movq VCPU_trap_ctxt(%rbx),%rcx - cmpb $TRAP_gp_fault,UREGS_entry_vector(%rsp) + cmpb $TRAP_gp_fault,UREGS_entry_vector(%rdi) movzwl VCPU_sysenter_sel(%rbx),%eax movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rcx),%ecx cmovel %ecx,%eax testl $~3,%eax - movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp) + movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rdi) cmovzl %ecx,%eax movw %ax,TRAPBOUNCE_cs(%rdx) call compat_create_bounce_frame @@ -247,26 +256,27 @@ ENTRY(compat_int80_direct_trap) /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */ /* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */ -/* %rdx: trap_bounce, %rbx: struct vcpu */ -/* On return only %rbx and %rdx are guaranteed non-clobbered. */ +/* %rdx: trap_bounce, %rbx: struct vcpu, %rdi: user_regs */ +/* On return only %rbx, %rdi and %rdx are guaranteed non-clobbered. */ compat_create_bounce_frame: ASSERT_INTERRUPTS_ENABLED - mov %fs,%edi + mov %fs,%ecx + pushq %rcx ASM_STAC - testb $2,UREGS_cs+8(%rsp) + testb $2,UREGS_cs(%rdi) jz 1f /* Push new frame at registered guest-OS stack base. */ movl VCPU_kernel_sp(%rbx),%esi .Lft1: mov VCPU_kernel_ss(%rbx),%fs subl $2*4,%esi - movl UREGS_rsp+8(%rsp),%eax + movl UREGS_rsp(%rdi),%eax .Lft2: movl %eax,%fs:(%rsi) - movl UREGS_ss+8(%rsp),%eax + movl UREGS_ss(%rdi),%eax .Lft3: movl %eax,%fs:4(%rsi) jmp 2f 1: /* In kernel context already: push new frame at existing %rsp. */ - movl UREGS_rsp+8(%rsp),%esi -.Lft4: mov UREGS_ss+8(%rsp),%fs + movl UREGS_rsp(%rdi),%esi +.Lft4: mov UREGS_ss(%rdi),%fs 2: movq VCPU_domain(%rbx),%r8 subl $3*4,%esi @@ -277,12 +287,12 @@ compat_create_bounce_frame: orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax) popq %rax shll $16,%eax # Bits 16-23: saved_upcall_mask - movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS + movw UREGS_cs(%rdi),%ax # Bits 0-15: CS .Lft5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask shrl $16,%eax testb %al,%al # Bits 0-7: saved_upcall_mask setz %ch # %ch == !saved_upcall_mask - movl UREGS_eflags+8(%rsp),%eax + movl UREGS_eflags(%rdi),%eax andl $~(X86_EFLAGS_IF|X86_EFLAGS_IOPL),%eax addb %ch,%ch # Bit 9 (EFLAGS.IF) orb %ch,%ah # Fold EFLAGS.IF into %eax @@ -291,7 +301,7 @@ compat_create_bounce_frame: cmovnzl VCPU_iopl(%rbx),%ecx # Bits 13:12 (EFLAGS.IOPL) orl %ecx,%eax # Fold EFLAGS.IOPL into %eax .Lft6: movl %eax,%fs:2*4(%rsi) # EFLAGS - movl UREGS_rip+8(%rsp),%eax + movl UREGS_rip(%rdi),%eax .Lft7: movl %eax,%fs:(%rsi) # EIP testb $TBF_EXCEPTION_ERRCODE,TRAPBOUNCE_flags(%rdx) jz 1f @@ -303,10 +313,11 @@ compat_create_bounce_frame: /* Rewrite our stack frame and return to guest-OS mode. */ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\ - X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp) - mov %fs,UREGS_ss+8(%rsp) - movl %esi,UREGS_rsp+8(%rsp) -.Lft13: mov %edi,%fs + X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags(%rdi) + mov %fs,UREGS_ss(%rdi) + movl %esi,UREGS_rsp(%rdi) +.Lft13: popq %rax + mov %eax,%fs movzwl TRAPBOUNCE_cs(%rdx),%eax /* Null selectors (0-3) are not allowed. */ testl $~3,%eax @@ -314,13 +325,14 @@ UNLIKELY_START(z, compat_bounce_null_selector) lea UNLIKELY_DISPATCH_LABEL(compat_bounce_null_selector)(%rip), %rdi jmp asm_domain_crash_synchronous /* Does not return */ __UNLIKELY_END(compat_bounce_null_selector) - movl %eax,UREGS_cs+8(%rsp) + movl %eax,UREGS_cs(%rdi) movl TRAPBOUNCE_eip(%rdx),%eax - movl %eax,UREGS_rip+8(%rsp) + movl %eax,UREGS_rip(%rdi) ret .section .fixup,"ax" .Lfx13: - xorl %edi,%edi + popq %rax + pushq $0 jmp .Lft13 .previous _ASM_EXTABLE(.Lft1, dom_crash_sync_extable) @@ -338,14 +350,16 @@ compat_crash_page_fault_8: compat_crash_page_fault_4: addl $4,%esi compat_crash_page_fault: -.Lft14: mov %edi,%fs +.Lft14: popq %rax + mov %eax,%fs ASM_CLAC movl %esi,%edi call show_page_walk jmp dom_crash_sync_extable .section .fixup,"ax" .Lfx14: - xorl %edi,%edi + popq %rax + pushq $0 jmp .Lft14 .previous _ASM_EXTABLE(.Lft14, .Lfx14) diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index cbd73f6c22..f7412b87c2 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -14,13 +14,13 @@ #include <public/xen.h> #include <irq_vectors.h> -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ ENTRY(switch_to_kernel) leaq VCPU_trap_bounce(%rbx),%rdx /* TB_eip = (32-bit syscall && syscall32_addr) ? * syscall32_addr : syscall_addr */ xor %eax,%eax - cmpw $FLAT_USER_CS32,UREGS_cs(%rsp) + cmpw $FLAT_USER_CS32,UREGS_cs(%rdi) cmoveq VCPU_syscall32_addr(%rbx),%rax testq %rax,%rax cmovzq VCPU_syscall_addr(%rbx),%rax @@ -31,7 +31,7 @@ ENTRY(switch_to_kernel) leal (,%rcx,TBF_INTERRUPT),%ecx movb %cl,TRAPBOUNCE_flags(%rdx) call create_bounce_frame - andl $~X86_EFLAGS_DF,UREGS_eflags(%rsp) + andl $~X86_EFLAGS_DF,UREGS_eflags(%rdi) jmp test_all_events /* %rbx: struct vcpu, interrupts disabled */ @@ -100,14 +100,16 @@ ENTRY(lstar_enter) pushq $0 movl $TRAP_syscall, 4(%rsp) SAVE_ALL + mov %rsp, %rdi GET_CURRENT(bx) testb $TF_kernel_mode,VCPU_thread_flags(%rbx) jz switch_to_kernel - mov %rsp, %rdi + push %rdi call pv_hypercall + pop %rdi -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ test_all_events: ASSERT_NOT_IN_ATOMIC cli # tests must not race interrupts @@ -138,20 +140,24 @@ test_guest_events: jmp test_all_events ALIGN -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ process_softirqs: sti + pushq %rdi call do_softirq + popq %rdi jmp test_all_events ALIGN -/* %rbx: struct vcpu */ +/* %rbx: struct vcpu, %rdi: user_regs */ process_mce: testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx) jnz .Ltest_guest_nmi sti movb $0,VCPU_mce_pending(%rbx) + push %rdi call set_guest_machinecheck_trapbounce + pop %rdi test %eax,%eax jz test_all_events movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the @@ -167,7 +173,9 @@ process_nmi: jnz test_guest_events sti movb $0,VCPU_nmi_pending(%rbx) + push %rdi call set_guest_nmi_trapbounce + pop %rdi test %eax,%eax jz test_all_events movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the @@ -192,11 +200,12 @@ GLOBAL(sysenter_eflags_saved) pushq $0 movl $TRAP_syscall, 4(%rsp) SAVE_ALL + movq %rsp, %rdi GET_CURRENT(bx) cmpb $0,VCPU_sysenter_disables_events(%rbx) movq VCPU_sysenter_addr(%rbx),%rax setne %cl - testl $X86_EFLAGS_NT,UREGS_eflags(%rsp) + testl $X86_EFLAGS_NT,UREGS_eflags(%rdi) leaq VCPU_trap_bounce(%rbx),%rdx UNLIKELY_START(nz, sysenter_nt_set) pushfq @@ -208,17 +217,17 @@ UNLIKELY_END(sysenter_nt_set) leal (,%rcx,TBF_INTERRUPT),%ecx UNLIKELY_START(z, sysenter_gpf) movq VCPU_trap_ctxt(%rbx),%rsi - movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) + movl $TRAP_gp_fault,UREGS_entry_vector(%rdi) movl %eax,TRAPBOUNCE_error_code(%rdx) movq TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax testb $4,TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_flags(%rsi) setnz %cl leal TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx UNLIKELY_END(sysenter_gpf) - movq VCPU_domain(%rbx),%rdi + movq VCPU_domain(%rbx),%rsi movq %rax,TRAPBOUNCE_eip(%rdx) movb %cl,TRAPBOUNCE_flags(%rdx) - testb $1,DOMAIN_is_32bit_pv(%rdi) + testb $1,DOMAIN_is_32bit_pv(%rsi) jnz compat_sysenter jmp .Lbounce_exception @@ -227,11 +236,14 @@ ENTRY(int80_direct_trap) pushq $0 movl $0x80, 4(%rsp) SAVE_ALL + mov %rsp, %rdi cmpb $0,untrusted_msi(%rip) UNLIKELY_START(ne, msi_check) + pushq %rdi movl $0x80,%edi call check_for_unexpected_msi + popq %rdi UNLIKELY_END(msi_check) GET_CURRENT(bx) @@ -253,30 +265,32 @@ int80_slow_path: * Setup entry vector and error code as if this was a GPF caused by an * IDT entry with DPL==0. */ - movl $((0x80 << 3) | X86_XEC_IDT),UREGS_error_code(%rsp) - movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) + movl $((0x80 << 3) | X86_XEC_IDT),UREGS_error_code(%rdi) + movl $TRAP_gp_fault,UREGS_entry_vector(%rdi) /* A GPF wouldn't have incremented the instruction pointer. */ - subq $2,UREGS_rip(%rsp) + subq $2,UREGS_rip(%rdi) jmp handle_exception_saved /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */ /* { RCX, R11, [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */ -/* %rdx: trap_bounce, %rbx: struct vcpu */ -/* On return only %rbx and %rdx are guaranteed non-clobbered. */ +/* %rdx: trap_bounce, %rbx: struct vcpu, %rdi: user_regs */ +/* On return only %rdi, %rbx and %rdx are guaranteed non-clobbered. */ create_bounce_frame: ASSERT_INTERRUPTS_ENABLED testb $TF_kernel_mode,VCPU_thread_flags(%rbx) jnz 1f /* Push new frame at registered guest-OS stack base. */ pushq %rdx + pushq %rdi movq %rbx,%rdi call toggle_guest_mode + popq %rdi popq %rdx movq VCPU_kernel_sp(%rbx),%rsi jmp 2f 1: /* In kernel context already: push new frame at existing %rsp. */ - movq UREGS_rsp+8(%rsp),%rsi - andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest. + movq UREGS_rsp(%rdi),%rsi + andb $0xfc,UREGS_cs(%rdi) # Indicate kernel context to guest. 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned. movq $HYPERVISOR_VIRT_START+1,%rax cmpq %rax,%rsi @@ -294,11 +308,10 @@ __UNLIKELY_END(create_bounce_frame_bad_sp) _ASM_EXTABLE(0b, domain_crash_page_fault_ ## n ## x8) subq $7*8,%rsi - movq UREGS_ss+8(%rsp),%rax + movq UREGS_ss(%rdi),%rax ASM_STAC - movq VCPU_domain(%rbx),%rdi STORE_GUEST_STACK(rax,6) # SS - movq UREGS_rsp+8(%rsp),%rax + movq UREGS_rsp(%rdi),%rax STORE_GUEST_STACK(rax,5) # RSP movq VCPU_vcpu_info(%rbx),%rax pushq VCPUINFO_upcall_mask(%rax) @@ -307,21 +320,24 @@ __UNLIKELY_END(create_bounce_frame_bad_sp) orb %ch,VCPUINFO_upcall_mask(%rax) popq %rax shlq $32,%rax # Bits 32-39: saved_upcall_mask - movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS + movw UREGS_cs(%rdi),%ax # Bits 0-15: CS STORE_GUEST_STACK(rax,3) # CS / saved_upcall_mask shrq $32,%rax testb $0xFF,%al # Bits 0-7: saved_upcall_mask setz %ch # %ch == !saved_upcall_mask - movl UREGS_eflags+8(%rsp),%eax + movl UREGS_eflags(%rdi),%eax + pushq %rdi + movq VCPU_domain(%rbx),%rdi andl $~(X86_EFLAGS_IF|X86_EFLAGS_IOPL),%eax addb %ch,%ch # Bit 9 (EFLAGS.IF) orb %ch,%ah # Fold EFLAGS.IF into %eax xorl %ecx,%ecx # if ( VM_ASSIST(v->domain, architectural_iopl) ) testb $1 << VMASST_TYPE_architectural_iopl,DOMAIN_vm_assist(%rdi) + popq %rdi cmovnzl VCPU_iopl(%rbx),%ecx # Bits 13:12 (EFLAGS.IOPL) orl %ecx,%eax # Fold EFLAGS.IOPL into %eax STORE_GUEST_STACK(rax,4) # RFLAGS - movq UREGS_rip+8(%rsp),%rax + movq UREGS_rip(%rdi),%rax STORE_GUEST_STACK(rax,2) # RIP testb $TBF_EXCEPTION_ERRCODE,TRAPBOUNCE_flags(%rdx) jz 1f @@ -329,9 +345,9 @@ __UNLIKELY_END(create_bounce_frame_bad_sp) movl TRAPBOUNCE_error_code(%rdx),%eax STORE_GUEST_STACK(rax,2) # ERROR CODE 1: - movq UREGS_r11+8(%rsp),%rax + movq UREGS_r11(%rdi),%rax STORE_GUEST_STACK(rax,1) # R11 - movq UREGS_rcx+8(%rsp),%rax + movq UREGS_rcx(%rdi),%rax STORE_GUEST_STACK(rax,0) # RCX ASM_CLAC @@ -340,19 +356,19 @@ __UNLIKELY_END(create_bounce_frame_bad_sp) /* Rewrite our stack frame and return to guest-OS mode. */ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */ - orl $TRAP_syscall,UREGS_entry_vector+8(%rsp) + orl $TRAP_syscall,UREGS_entry_vector(%rdi) andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\ - X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp) - movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp) - movq %rsi,UREGS_rsp+8(%rsp) - movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp) + X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags(%rdi) + movq $FLAT_KERNEL_SS,UREGS_ss(%rdi) + movq %rsi,UREGS_rsp(%rdi) + movq $FLAT_KERNEL_CS,UREGS_cs(%rdi) movq TRAPBOUNCE_eip(%rdx),%rax testq %rax,%rax UNLIKELY_START(z, create_bounce_frame_bad_bounce_ip) lea UNLIKELY_DISPATCH_LABEL(create_bounce_frame_bad_bounce_ip)(%rip), %rdi jmp asm_domain_crash_synchronous /* Does not return */ __UNLIKELY_END(create_bounce_frame_bad_bounce_ip) - movq %rax,UREGS_rip+8(%rsp) + movq %rax,UREGS_rip(%rdi) ret .pushsection .fixup, "ax", @progbits @@ -391,15 +407,17 @@ ENTRY(dom_crash_sync_extable) ENTRY(common_interrupt) SAVE_ALL CLAC - CR4_PV32_RESTORE movq %rsp,%rdi + CR4_PV32_RESTORE + pushq %rdi callq do_IRQ + popq %rdi jmp ret_from_intr /* No special register assumptions. */ ENTRY(ret_from_intr) GET_CURRENT(bx) - testb $3,UREGS_cs(%rsp) + testb $3,UREGS_cs(%rdi) jz restore_all_xen movq VCPU_domain(%rbx),%rax testb $1,DOMAIN_is_32bit_pv(%rax) @@ -411,9 +429,10 @@ ENTRY(page_fault) /* No special register assumptions. */ GLOBAL(handle_exception) SAVE_ALL CLAC + movq %rsp, %rdi handle_exception_saved: GET_CURRENT(bx) - testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) + testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rdi) jz exception_with_ints_disabled .Lcr4_pv32_orig: @@ -434,7 +453,7 @@ handle_exception_saved: (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt) .popsection - testb $3,UREGS_cs(%rsp) + testb $3,UREGS_cs(%rdi) jz .Lcr4_pv32_done cmpb $0,DOMAIN_is_32bit_pv(%rax) je .Lcr4_pv32_done @@ -463,20 +482,21 @@ handle_exception_saved: * goto compat_test_all_events; */ mov $PFEC_page_present,%al - cmpb $TRAP_page_fault,UREGS_entry_vector(%rsp) + cmpb $TRAP_page_fault,UREGS_entry_vector(%rdi) jne .Lcr4_pv32_done - xor UREGS_error_code(%rsp),%eax + xor UREGS_error_code(%rdi),%eax test $~(PFEC_write_access|PFEC_insn_fetch),%eax jz compat_test_all_events .Lcr4_pv32_done: sti -1: movq %rsp,%rdi - movzbl UREGS_entry_vector(%rsp),%eax +1: movzbl UREGS_entry_vector(%rdi),%eax leaq exception_table(%rip),%rdx PERFC_INCR(exceptions, %rax, %rbx) + pushq %rdi mov (%rdx, %rax, 8), %rdx INDIRECT_CALL %rdx - testb $3,UREGS_cs(%rsp) + popq %rdi + testb $3,UREGS_cs(%rdi) jz restore_all_xen leaq VCPU_trap_bounce(%rbx),%rdx movq VCPU_domain(%rbx),%rax @@ -491,10 +511,11 @@ handle_exception_saved: /* No special register assumptions. */ exception_with_ints_disabled: - testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen? + testb $3,UREGS_cs(%rdi) # interrupts disabled outside Xen? jnz FATAL_exception_with_ints_disabled - movq %rsp,%rdi + /* %rsp == %rdi here! */ call search_pre_exception_table + movq %rsp,%rdi testq %rax,%rax # no fixup code for faulting EIP? jz 1b movq %rax,UREGS_rip(%rsp) @@ -513,7 +534,6 @@ exception_with_ints_disabled: /* No special register assumptions. */ FATAL_exception_with_ints_disabled: xorl %esi,%esi - movq %rsp,%rdi call fatal_trap BUG /* fatal_trap() shouldn't return. */ @@ -604,25 +624,32 @@ ENTRY(nmi) movl $TRAP_nmi,4(%rsp) handle_ist_exception: SAVE_ALL CLAC + movq %rsp, %rdi CR4_PV32_RESTORE - testb $3,UREGS_cs(%rsp) + movq %rdi,%rdx + movq %rdi,%rbx + subq %rsp,%rbx + testb $3,UREGS_cs(%rdi) jz 1f /* Interrupted guest context. Copy the context to stack bottom. */ GET_CPUINFO_FIELD(guest_cpu_user_regs,di) - movq %rsp,%rsi + addq %rbx,%rdi + movq %rdx,%rsi movl $UREGS_kernel_sizeof/8,%ecx movq %rdi,%rsp rep movsq -1: movq %rsp,%rdi - movzbl UREGS_entry_vector(%rsp),%eax + movq %rdx,%rdi +1: movzbl UREGS_entry_vector(%rdi),%eax leaq exception_table(%rip),%rdx + pushq %rdi mov (%rdx, %rax, 8), %rdx INDIRECT_CALL %rdx - cmpb $TRAP_nmi,UREGS_entry_vector(%rsp) + popq %rdi + cmpb $TRAP_nmi,UREGS_entry_vector(%rdi) jne ret_from_intr /* We want to get straight to the IRET on the NMI exit path. */ - testb $3,UREGS_cs(%rsp) + testb $3,UREGS_cs(%rdi) jz restore_all_xen GET_CURRENT(bx) /* Send an IPI to ourselves to cover for the lack of event checking. */ @@ -631,8 +658,10 @@ handle_ist_exception: leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx cmpl $0,(%rcx,%rax,1) je 1f + pushq %rdi movl $EVENT_CHECK_VECTOR,%edi call send_IPI_self + popq %rdi 1: movq VCPU_domain(%rbx),%rax cmpb $0,DOMAIN_is_32bit_pv(%rax) je restore_all_guest diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h index 89849929eb..c7acbb97da 100644 --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -95,9 +95,13 @@ unsigned long get_stack_dump_bottom (unsigned long sp); ({ \ __asm__ __volatile__ ( \ "mov %0,%%"__OP"sp;" \ - CHECK_FOR_LIVEPATCH_WORK \ - "jmp %c1" \ - : : "r" (guest_cpu_user_regs()), "i" (__fn) : "memory" ); \ + "mov %1,%%"__OP"di;" \ + "pushq %%"__OP"di;" \ + CHECK_FOR_LIVEPATCH_WORK \ + "popq %%"__OP"di;" \ + "jmp %c2" \ + : : "r" (get_cpu_info()), "r" (guest_cpu_user_regs()), \ + "i" (__fn) : "memory" ); \ unreachable(); \ }) -- 2.13.6 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |