[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 13/16] x86: rearrange x86_64/entry.S



Split the file into two halves. The first half pertains to PV guest
code while the second half is mostly used by the hypervisor itself to
handle interrupts and exceptions.

No functional change intended.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
v2: new, requested by Andrew
---
 xen/arch/x86/x86_64/entry.S | 94 ++++++++++++++++++++------------------
 1 file changed, 51 insertions(+), 43 deletions(-)

diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 48cb96c..319f923 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -121,16 +121,6 @@ process_trap:
         call create_bounce_frame
         jmp  test_all_events
 
-/* No special register assumptions. */
-ENTRY(ret_from_intr)
-        GET_CURRENT(bx)
-        testb $3, UREGS_cs(%rsp)
-        jz    restore_all_xen
-        movq  VCPU_domain(%rbx), %rax
-        cmpb  $0, DOMAIN_is_32bit_pv(%rax)
-        je    test_all_events
-        jmp   compat_test_all_events
-
         .section .text.entry, "ax", @progbits
 
 /* %rbx: struct vcpu, interrupts disabled */
@@ -211,26 +201,6 @@ iret_exit_to_guest:
 .Lft0:  iretq
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
 
-        ALIGN
-/* No special register assumptions. */
-restore_all_xen:
-        /*
-         * Check whether we need to switch to the per-CPU page tables, in
-         * case we return to late PV exit code (from an NMI or #MC).
-         */
-        GET_STACK_END(bx)
-        cmpb  $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
-UNLIKELY_START(ne, exit_cr3)
-        mov   STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax
-        mov   %rax, %cr3
-UNLIKELY_END(exit_cr3)
-
-        /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
-        SPEC_CTRL_EXIT_TO_XEN_IST /* Req: %rbx=end, Clob: acd */
-
-        RESTORE_ALL adj=8
-        iretq
-
 /*
  * When entering SYSCALL from kernel mode:
  *  %rax                            = hypercall vector
@@ -420,6 +390,21 @@ int80_slow_path:
         GET_STACK_END(14)
         jmp   handle_exception_saved
 
+self_ipi_restore_all_guest:
+        GET_CURRENT(bx)
+        /* Send an IPI to ourselves to cover for the lack of event checking. */
+        movl  VCPU_processor(%rbx),%eax
+        shll  $IRQSTAT_shift,%eax
+        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
+        cmpl  $0,(%rcx,%rax,1)
+        je    1f
+        movl  $EVENT_CHECK_VECTOR,%edi
+        call  send_IPI_self
+1:      movq  VCPU_domain(%rbx),%rax
+        cmpb  $0,DOMAIN_is_32bit_pv(%rax)
+        je    restore_all_guest
+        jmp   compat_restore_all_guest
+
         /* create_bounce_frame & helpers don't need to be in .text.entry */
         .text
 
@@ -553,8 +538,43 @@ ENTRY(dom_crash_sync_extable)
         jmp   asm_domain_crash_synchronous /* Does not return */
         .popsection
 
+/* --- CODE BELOW THIS LINE (MOSTLY) NOT GUEST RELATED --- */
+
+        .text
+
+        ALIGN
+/* No special register assumptions. */
+ENTRY(ret_from_intr)
+        GET_CURRENT(bx)
+        testb $3, UREGS_cs(%rsp)
+        jz    restore_all_xen
+        movq  VCPU_domain(%rbx), %rax
+        cmpb  $0, DOMAIN_is_32bit_pv(%rax)
+        je    test_all_events
+        jmp   compat_test_all_events
+
         .section .text.entry, "ax", @progbits
 
+        ALIGN
+/* No special register assumptions. */
+restore_all_xen:
+        /*
+         * Check whether we need to switch to the per-CPU page tables, in
+         * case we return to late PV exit code (from an NMI or #MC).
+         */
+        GET_STACK_END(bx)
+        cmpb  $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
+UNLIKELY_START(ne, exit_cr3)
+        mov   STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax
+        mov   %rax, %cr3
+UNLIKELY_END(exit_cr3)
+
+        /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+        SPEC_CTRL_EXIT_TO_XEN_IST /* Req: %rbx=end, Clob: acd */
+
+        RESTORE_ALL adj=8
+        iretq
+
 ENTRY(common_interrupt)
         SAVE_ALL CLAC
 
@@ -845,19 +865,7 @@ handle_ist_exception:
         /* We want to get straight to the IRET on the NMI exit path. */
         testb $3,UREGS_cs(%rsp)
         jz    restore_all_xen
-        GET_CURRENT(bx)
-        /* Send an IPI to ourselves to cover for the lack of event checking. */
-        movl  VCPU_processor(%rbx),%eax
-        shll  $IRQSTAT_shift,%eax
-        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
-        cmpl  $0,(%rcx,%rax,1)
-        je    1f
-        movl  $EVENT_CHECK_VECTOR,%edi
-        call  send_IPI_self
-1:      movq  VCPU_domain(%rbx),%rax
-        cmpb  $0,DOMAIN_is_32bit_pv(%rax)
-        je    restore_all_guest
-        jmp   compat_restore_all_guest
+        jmp   self_ipi_restore_all_guest
 
 ENTRY(machine_check)
         pushq $0
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.