[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/4] x86/pv: Drop {compat_, }create_bounce_frame() and use the C version instead



The clobbering of TRAPBOUNCE_flags in .L{compat_}bounce_exception is subsumed
by the logic at the end of pv_create_bounce_frame().

This cleanup removes all callers of asm_domain_crash_synchronous(), which is
therefore dropped as well.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
---
 xen/arch/x86/traps.c               |  23 ------
 xen/arch/x86/x86_64/compat/entry.S | 116 ++----------------------------
 xen/arch/x86/x86_64/entry.S        | 141 ++-----------------------------------
 xen/include/xen/sched.h            |   7 --
 4 files changed, 10 insertions(+), 277 deletions(-)

diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index ece2c13..73a9c7c 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -4195,29 +4195,6 @@ unsigned long do_get_debugreg(int reg)
     return -EINVAL;
 }
 
-void asm_domain_crash_synchronous(unsigned long addr)
-{
-    /*
-     * We need clear AC bit here because in entry.S AC is set
-     * by ASM_STAC to temporarily allow accesses to user pages
-     * which is prevented by SMAP by default.
-     *
-     * For some code paths, where this function is called, clac()
-     * is not needed, but adding clac() here instead of each place
-     * asm_domain_crash_synchronous() is called can reduce the code
-     * redundancy, and it is harmless as well.
-     */
-    clac();
-
-    if ( addr == 0 )
-        addr = this_cpu(last_extable_addr);
-
-    printk("domain_crash_sync called from entry.S: fault at %p %pS\n",
-           _p(addr), _p(addr));
-
-    __domain_crash_synchronous();
-}
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/x86_64/compat/entry.S 
b/xen/arch/x86/x86_64/compat/entry.S
index 90bda09..1cd4672 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -51,7 +51,7 @@ compat_test_guest_events:
         movl  VCPU_event_sel(%rbx),%eax
         movw  %ax,TRAPBOUNCE_cs(%rdx)
         movb  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-        call  compat_create_bounce_frame
+        call  pv_create_exception_frame
         jmp   compat_test_all_events
 
         ALIGN
@@ -95,7 +95,7 @@ compat_process_nmi:
         /* FALLTHROUGH */
 compat_process_trap:
         leaq  VCPU_trap_bounce(%rbx),%rdx
-        call  compat_create_bounce_frame
+        call  pv_create_exception_frame
         jmp   compat_test_all_events
 
 /* %rbx: struct vcpu, interrupts disabled */
@@ -181,8 +181,7 @@ ENTRY(compat_post_handle_exception)
         testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
         jz    compat_test_all_events
 .Lcompat_bounce_exception:
-        call  compat_create_bounce_frame
-        movb  $0,TRAPBOUNCE_flags(%rdx)
+        call  pv_create_exception_frame
         jmp   compat_test_all_events
 
 /* See lstar_enter for entry register state. */
@@ -234,115 +233,10 @@ ENTRY(compat_sysenter)
         movl  $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
         cmovzl %ecx,%eax
         movw  %ax,TRAPBOUNCE_cs(%rdx)
-        call  compat_create_bounce_frame
+        call  pv_create_exception_frame
         jmp   compat_test_all_events
 
 ENTRY(compat_int80_direct_trap)
         CR4_PV32_RESTORE
-        call  compat_create_bounce_frame
+        call  pv_create_exception_frame
         jmp   compat_test_all_events
-
-/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
-/*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
-/* %rdx: trap_bounce, %rbx: struct vcpu                                  */
-/* On return only %rbx and %rdx are guaranteed non-clobbered.            */
-compat_create_bounce_frame:
-        ASSERT_INTERRUPTS_ENABLED
-        mov   %fs,%edi
-        ASM_STAC
-        testb $2,UREGS_cs+8(%rsp)
-        jz    1f
-        /* Push new frame at registered guest-OS stack base. */
-        movl  VCPU_kernel_sp(%rbx),%esi
-.Lft1:  mov   VCPU_kernel_ss(%rbx),%fs
-        subl  $2*4,%esi
-        movl  UREGS_rsp+8(%rsp),%eax
-.Lft2:  movl  %eax,%fs:(%rsi)
-        movl  UREGS_ss+8(%rsp),%eax
-.Lft3:  movl  %eax,%fs:4(%rsi)
-        jmp   2f
-1:      /* In kernel context already: push new frame at existing %rsp. */
-        movl  UREGS_rsp+8(%rsp),%esi
-.Lft4:  mov   UREGS_ss+8(%rsp),%fs
-2:
-        movq  VCPU_domain(%rbx),%r8
-        subl  $3*4,%esi
-        movq  VCPU_vcpu_info(%rbx),%rax
-        pushq COMPAT_VCPUINFO_upcall_mask(%rax)
-        testb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-        setnz %ch                       # TBF_INTERRUPT -> set upcall mask
-        orb   %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
-        popq  %rax
-        shll  $16,%eax                  # Bits 16-23: saved_upcall_mask
-        movw  UREGS_cs+8(%rsp),%ax      # Bits  0-15: CS
-.Lft5:  movl  %eax,%fs:4(%rsi)          # CS / saved_upcall_mask
-        shrl  $16,%eax
-        testb %al,%al                   # Bits 0-7: saved_upcall_mask
-        setz  %ch                       # %ch == !saved_upcall_mask
-        movl  UREGS_eflags+8(%rsp),%eax
-        andl  $~(X86_EFLAGS_IF|X86_EFLAGS_IOPL),%eax
-        addb  %ch,%ch                   # Bit 9 (EFLAGS.IF)
-        orb   %ch,%ah                   # Fold EFLAGS.IF into %eax
-        xorl  %ecx,%ecx                 # if ( VM_ASSIST(v->domain, 
architectural_iopl) )
-        testb $1 << VMASST_TYPE_architectural_iopl,DOMAIN_vm_assist(%r8)
-        cmovnzl VCPU_iopl(%rbx),%ecx    # Bits 13:12 (EFLAGS.IOPL)
-        orl   %ecx,%eax                 # Fold EFLAGS.IOPL into %eax
-.Lft6:  movl  %eax,%fs:2*4(%rsi)        # EFLAGS
-        movl  UREGS_rip+8(%rsp),%eax
-.Lft7:  movl  %eax,%fs:(%rsi)           # EIP
-        testb $TBF_EXCEPTION_ERRCODE,TRAPBOUNCE_flags(%rdx)
-        jz    1f
-        subl  $4,%esi
-        movl  TRAPBOUNCE_error_code(%rdx),%eax
-.Lft8:  movl  %eax,%fs:(%rsi)           # ERROR CODE
-1:
-        ASM_CLAC
-        /* Rewrite our stack frame and return to guest-OS mode. */
-        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
-        andl  $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
-                 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
-        mov   %fs,UREGS_ss+8(%rsp)
-        movl  %esi,UREGS_rsp+8(%rsp)
-.Lft13: mov   %edi,%fs
-        movzwl TRAPBOUNCE_cs(%rdx),%eax
-        /* Null selectors (0-3) are not allowed. */
-        testl $~3,%eax
-UNLIKELY_START(z, compat_bounce_null_selector)
-        lea   UNLIKELY_DISPATCH_LABEL(compat_bounce_null_selector)(%rip), %rdi
-        jmp   asm_domain_crash_synchronous  /* Does not return */
-__UNLIKELY_END(compat_bounce_null_selector)
-        movl  %eax,UREGS_cs+8(%rsp)
-        movl  TRAPBOUNCE_eip(%rdx),%eax
-        movl  %eax,UREGS_rip+8(%rsp)
-        ret
-.section .fixup,"ax"
-.Lfx13:
-        xorl  %edi,%edi
-        jmp   .Lft13
-.previous
-        _ASM_EXTABLE(.Lft1,  dom_crash_sync_extable)
-        _ASM_EXTABLE(.Lft2,  compat_crash_page_fault)
-        _ASM_EXTABLE(.Lft3,  compat_crash_page_fault_4)
-        _ASM_EXTABLE(.Lft4,  dom_crash_sync_extable)
-        _ASM_EXTABLE(.Lft5,  compat_crash_page_fault_4)
-        _ASM_EXTABLE(.Lft6,  compat_crash_page_fault_8)
-        _ASM_EXTABLE(.Lft7,  compat_crash_page_fault)
-        _ASM_EXTABLE(.Lft8,  compat_crash_page_fault)
-        _ASM_EXTABLE(.Lft13, .Lfx13)
-
-compat_crash_page_fault_8:
-        addl  $4,%esi
-compat_crash_page_fault_4:
-        addl  $4,%esi
-compat_crash_page_fault:
-.Lft14: mov   %edi,%fs
-        ASM_CLAC
-        movl  %esi,%edi
-        call  show_page_walk
-        jmp   dom_crash_sync_extable
-.section .fixup,"ax"
-.Lfx14:
-        xorl  %edi,%edi
-        jmp   .Lft14
-.previous
-        _ASM_EXTABLE(.Lft14, .Lfx14)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 57952d0..7d59051 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -28,7 +28,7 @@ ENTRY(switch_to_kernel)
         setc  %cl
         leal  (,%rcx,TBF_INTERRUPT),%ecx
         movb  %cl,TRAPBOUNCE_flags(%rdx)
-        call  create_bounce_frame
+        call  pv_create_exception_frame
         andl  $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
         jmp   test_all_events
 
@@ -131,7 +131,7 @@ test_guest_events:
         movq  VCPU_event_addr(%rbx),%rax
         movq  %rax,TRAPBOUNCE_eip(%rdx)
         movb  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-        call  create_bounce_frame
+        call  pv_create_exception_frame
         jmp   test_all_events
 
         ALIGN
@@ -175,7 +175,7 @@ process_nmi:
         /* FALLTHROUGH */
 process_trap:
         leaq VCPU_trap_bounce(%rbx),%rdx
-        call create_bounce_frame
+        call pv_create_exception_frame
         jmp  test_all_events
 
 ENTRY(sysenter_entry)
@@ -266,7 +266,7 @@ UNLIKELY_END(msi_check)
         testb $1,DOMAIN_is_32bit_pv(%rax)
         jnz   compat_int80_direct_trap
 
-        call  create_bounce_frame
+        call  pv_create_exception_frame
         jmp   test_all_events
 
 int80_slow_path:
@@ -281,136 +281,6 @@ int80_slow_path:
         subq  $2,UREGS_rip(%rsp)
         jmp   handle_exception_saved
 
-/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK:                     */
-/*   { RCX, R11, [ERRCODE,] RIP, CS, RFLAGS, RSP, SS }                   */
-/* %rdx: trap_bounce, %rbx: struct vcpu                                  */
-/* On return only %rbx and %rdx are guaranteed non-clobbered.            */
-create_bounce_frame:
-        ASSERT_INTERRUPTS_ENABLED
-        testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
-        jnz   1f
-        /* Push new frame at registered guest-OS stack base. */
-        pushq %rdx
-        movq  %rbx,%rdi
-        call  toggle_guest_mode
-        popq  %rdx
-        movq  VCPU_kernel_sp(%rbx),%rsi
-        jmp   2f
-1:      /* In kernel context already: push new frame at existing %rsp. */
-        movq  UREGS_rsp+8(%rsp),%rsi
-        andb  $0xfc,UREGS_cs+8(%rsp)    # Indicate kernel context to guest.
-2:      andq  $~0xf,%rsi                # Stack frames are 16-byte aligned.
-        movq  $HYPERVISOR_VIRT_START+1,%rax
-        cmpq  %rax,%rsi
-        movq  $HYPERVISOR_VIRT_END+8*8,%rax
-        sbb   %ecx,%ecx                 # In +ve address space? Then okay.
-        cmpq  %rax,%rsi
-        adc   %ecx,%ecx                 # Above Xen private area? Then okay.
-UNLIKELY_START(g, create_bounce_frame_bad_sp)
-        lea   UNLIKELY_DISPATCH_LABEL(create_bounce_frame_bad_sp)(%rip), %rdi
-        jmp   asm_domain_crash_synchronous  /* Does not return */
-__UNLIKELY_END(create_bounce_frame_bad_sp)
-
-#define STORE_GUEST_STACK(reg, n) \
-0:      movq  %reg,(n)*8(%rsi); \
-        _ASM_EXTABLE(0b, domain_crash_page_fault_ ## n ## x8)
-
-        subq  $7*8,%rsi
-        movq  UREGS_ss+8(%rsp),%rax
-        ASM_STAC
-        movq  VCPU_domain(%rbx),%rdi
-        STORE_GUEST_STACK(rax,6)        # SS
-        movq  UREGS_rsp+8(%rsp),%rax
-        STORE_GUEST_STACK(rax,5)        # RSP
-        movq  VCPU_vcpu_info(%rbx),%rax
-        pushq VCPUINFO_upcall_mask(%rax)
-        testb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-        setnz %ch                       # TBF_INTERRUPT -> set upcall mask
-        orb   %ch,VCPUINFO_upcall_mask(%rax)
-        popq  %rax
-        shlq  $32,%rax                  # Bits 32-39: saved_upcall_mask
-        movw  UREGS_cs+8(%rsp),%ax      # Bits  0-15: CS
-        STORE_GUEST_STACK(rax,3)        # CS / saved_upcall_mask
-        shrq  $32,%rax
-        testb $0xFF,%al                 # Bits 0-7: saved_upcall_mask
-        setz  %ch                       # %ch == !saved_upcall_mask
-        movl  UREGS_eflags+8(%rsp),%eax
-        andl  $~(X86_EFLAGS_IF|X86_EFLAGS_IOPL),%eax
-        addb  %ch,%ch                   # Bit 9 (EFLAGS.IF)
-        orb   %ch,%ah                   # Fold EFLAGS.IF into %eax
-        xorl  %ecx,%ecx                 # if ( VM_ASSIST(v->domain, 
architectural_iopl) )
-        testb $1 << VMASST_TYPE_architectural_iopl,DOMAIN_vm_assist(%rdi)
-        cmovnzl VCPU_iopl(%rbx),%ecx    # Bits 13:12 (EFLAGS.IOPL)
-        orl   %ecx,%eax                 # Fold EFLAGS.IOPL into %eax
-        STORE_GUEST_STACK(rax,4)        # RFLAGS
-        movq  UREGS_rip+8(%rsp),%rax
-        STORE_GUEST_STACK(rax,2)        # RIP
-        testb $TBF_EXCEPTION_ERRCODE,TRAPBOUNCE_flags(%rdx)
-        jz    1f
-        subq  $8,%rsi
-        movl  TRAPBOUNCE_error_code(%rdx),%eax
-        STORE_GUEST_STACK(rax,2)        # ERROR CODE
-1:
-        movq  UREGS_r11+8(%rsp),%rax
-        STORE_GUEST_STACK(rax,1)        # R11
-        movq  UREGS_rcx+8(%rsp),%rax
-        STORE_GUEST_STACK(rax,0)        # RCX
-        ASM_CLAC
-
-#undef STORE_GUEST_STACK
-
-        /* Rewrite our stack frame and return to guest-OS mode. */
-        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
-        /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
-        orl   $TRAP_syscall,UREGS_entry_vector+8(%rsp)
-        andl  $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
-                 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
-        movq  $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
-        movq  %rsi,UREGS_rsp+8(%rsp)
-        movq  $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
-        movq  TRAPBOUNCE_eip(%rdx),%rax
-        testq %rax,%rax
-UNLIKELY_START(z, create_bounce_frame_bad_bounce_ip)
-        lea   
UNLIKELY_DISPATCH_LABEL(create_bounce_frame_bad_bounce_ip)(%rip), %rdi
-        jmp   asm_domain_crash_synchronous  /* Does not return */
-__UNLIKELY_END(create_bounce_frame_bad_bounce_ip)
-        movq  %rax,UREGS_rip+8(%rsp)
-        ret
-
-        .pushsection .fixup, "ax", @progbits
-        # Numeric tags below represent the intended overall %rsi adjustment.
-domain_crash_page_fault_6x8:
-        addq  $8,%rsi
-domain_crash_page_fault_5x8:
-        addq  $8,%rsi
-domain_crash_page_fault_4x8:
-        addq  $8,%rsi
-domain_crash_page_fault_3x8:
-        addq  $8,%rsi
-domain_crash_page_fault_2x8:
-        addq  $8,%rsi
-domain_crash_page_fault_1x8:
-        addq  $8,%rsi
-domain_crash_page_fault_0x8:
-        ASM_CLAC
-        movq  %rsi,%rdi
-        call  show_page_walk
-ENTRY(dom_crash_sync_extable)
-        ASM_CLAC
-        # Get out of the guest-save area of the stack.
-        GET_STACK_END(ax)
-        leaq  STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
-        # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
-        __GET_CURRENT(ax)
-        movq  VCPU_domain(%rax),%rax
-        testb $1,DOMAIN_is_32bit_pv(%rax)
-        setz  %al
-        leal  (%rax,%rax,2),%eax
-        orb   %al,UREGS_cs(%rsp)
-        xorl  %edi,%edi
-        jmp   asm_domain_crash_synchronous /* Does not return */
-        .popsection
-
 ENTRY(common_interrupt)
         SAVE_ALL CLAC
         CR4_PV32_RESTORE
@@ -506,8 +376,7 @@ handle_exception_saved:
         testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
         jz    test_all_events
 .Lbounce_exception:
-        call  create_bounce_frame
-        movb  $0,TRAPBOUNCE_flags(%rdx)
+        call  pv_create_exception_frame
         jmp   test_all_events
 
 /* No special register assumptions. */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 1127ca9..a0ef63a 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -632,13 +632,6 @@ void noreturn __domain_crash_synchronous(void);
     __domain_crash_synchronous();                                         \
 } while (0)
 
-/*
- * Called from assembly code, with an optional address to help indicate why
- * the crash occured.  If addr is 0, look up address from last extable
- * redirection.
- */
-void noreturn asm_domain_crash_synchronous(unsigned long addr);
-
 #define set_current_state(_s) do { current->state = (_s); } while (0)
 void scheduler_init(void);
 int  sched_init_vcpu(struct vcpu *v, unsigned int processor);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.