[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 10/15] xen: arm: handle traps from 64-bit guests



While there observe that we weren't ever restoring the outer stack frame, even
for 32-bit guests when running a 64-bit hypervisor! The outer stack frame
"only" contains most of the SPSR registers for 32-bit...

We can also remove the logic at "return_to_new_vcpu" which detects returns to
hypervisor mode -- seemingly trying to handle hypervisor threads which aren't
an thing which we have. The idle VCPUs do not take this path. This simplifies
the return_to_new_vcpu code, we also split it into 32- and 64-bit VCPU paths.

There is no need to export return_to_hypervisor (now renamed return_from_trap
on arm64) and likewise stop doing so on 32-bit as well.

Also tweak the case of some system registers for consistency.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
v3: actually restore the outer stack frame! Due to this change I have dropped
the previous Acks.
---
 xen/arch/arm/arm32/entry.S |    4 +-
 xen/arch/arm/arm64/entry.S |   95 ++++++++++++++++++++++++++++++++-----------
 xen/arch/arm/domain.c      |    6 ++-
 3 files changed, 77 insertions(+), 28 deletions(-)

diff --git a/xen/arch/arm/arm32/entry.S b/xen/arch/arm/arm32/entry.S
index 76814dd..cc0a99f 100644
--- a/xen/arch/arm/arm32/entry.S
+++ b/xen/arch/arm/arm32/entry.S
@@ -87,7 +87,7 @@ DEFINE_TRAP_ENTRY_NOIRQ(fiq)
 
 return_from_trap:
         mov sp, r11
-ENTRY(return_to_new_vcpu)
+ENTRY(return_to_new_vcpu32)
         ldr r11, [sp, #UREGS_cpsr]
         and r11, #PSR_MODE_MASK
         cmp r11, #PSR_MODE_HYP
@@ -108,7 +108,7 @@ ENTRY(return_to_guest)
         RESTORE_ONE_BANKED(R8_fiq); RESTORE_ONE_BANKED(R9_fiq); 
RESTORE_ONE_BANKED(R10_fiq)
         RESTORE_ONE_BANKED(R11_fiq); RESTORE_ONE_BANKED(R12_fiq);
         /* Fall thru */
-ENTRY(return_to_hypervisor)
+return_to_hypervisor:
         cpsid i
         ldr lr, [sp, #UREGS_lr]
         ldr r11, [sp, #UREGS_pc]
diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index b5af1e2..9cda8f1 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -26,7 +26,7 @@ lr      .req    x30             // link register
         .macro  entry_guest, compat
 
         add     x21, sp, #UREGS_SPSR_el1
-        mrs     x23, SPSR_EL1
+        mrs     x23, SPSR_el1
         str     x23, [x21]
 
         .if \compat == 0 /* Aarch64 mode */
@@ -40,24 +40,56 @@ lr      .req    x30             // link register
         mrs     x23, ELR_el1
         stp     x22, x23, [x21]
 
-        .else             /* Aarch32 mode */
+        .else            /* Aarch32 mode */
 
         add     x21, sp, #UREGS_SPSR_fiq
-        mrs     x22, spsr_fiq
-        mrs     x23, spsr_irq
+        mrs     x22, SPSR_fiq
+        mrs     x23, SPSR_irq
         stp     w22, w23, [x21]
 
         add     x21, sp, #UREGS_SPSR_und
-        mrs     x22, spsr_und
-        mrs     x23, spsr_abt
+        mrs     x22, SPSR_und
+        mrs     x23, SPSR_abt
         stp     w22, w23, [x21]
 
         .endif
 
         .endm
 
+        .macro  exit_guest, compat
+
+        add     x21, sp, #UREGS_SPSR_el1
+        ldr     x23, [x21]
+        msr     SPSR_el1, x23
+
+        .if \compat == 0 /* Aarch64 mode */
+
+        add     x21, sp, #UREGS_SP_el0
+        ldr     x22, [x21]
+        msr     SP_el0, x22
+
+        add     x21, sp, #UREGS_SP_el1
+        ldp     x22, x23, [x21]
+        msr     SP_el1, x22
+        msr     ELR_el1, x23
+
+        .else            /* Aarch32 mode */
+
+        add     x21, sp, #UREGS_SPSR_fiq
+        ldp     w22, w23, [x21]
+        msr     SPSR_fiq, x22
+        msr     SPSR_irq, x23
+
+        add     x21, sp, #UREGS_SPSR_und
+        ldp     w22, w23, [x21]
+        msr     SPSR_und, x22
+        msr     SPSR_abt, x23
+
+        .endif
+
+        .endm
 /*
- * Save state on entry to hypervisor
+ * Save state on entry to hypervisor, restore on exit
  */
         .macro  entry, hyp, compat
         sub     sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
@@ -96,6 +128,20 @@ lr      .req    x30             // link register
 
         .endm
 
+        .macro  exit, hyp, compat
+
+        .if \hyp == 0         /* Guest mode */
+
+        bl      leave_hypervisor_tail /* Disables interrupts on return */
+
+        exit_guest \compat
+
+        .endif
+
+        b       return_from_trap
+
+        .endm
+
 /*
  * Bad Abort numbers
  *-----------------
@@ -133,21 +179,26 @@ hyp_sync:
         msr     daifclr, #2
         mov     x0, sp
         bl      do_trap_hypervisor
-        b       return_to_hypervisor
+        exit    hyp=1
 
 hyp_irq:
         entry   hyp=1
         mov     x0, sp
         bl      do_trap_irq
-        b       return_to_hypervisor
+        exit    hyp=1
 
 guest_sync:
         entry   hyp=0, compat=0
-        invalid BAD_SYNC /* No AArch64 guest support yet */
+        msr     daifclr, #2
+        mov     x0, sp
+        bl      do_trap_hypervisor
+        exit    hyp=0, compat=0
 
 guest_irq:
         entry   hyp=0, compat=0
-        invalid BAD_IRQ /* No AArch64 guest support yet */
+        mov     x0, sp
+        bl      do_trap_irq
+        exit    hyp=0, compat=0
 
 guest_fiq_invalid:
         entry   hyp=0, compat=0
@@ -162,13 +213,13 @@ guest_sync_compat:
         msr     daifclr, #2
         mov     x0, sp
         bl      do_trap_hypervisor
-        b       return_to_guest
+        exit    hyp=0, compat=1
 
 guest_irq_compat:
         entry   hyp=0, compat=1
         mov     x0, sp
         bl      do_trap_irq
-        b       return_to_guest
+        exit    hyp=0, compat=1
 
 guest_fiq_invalid_compat:
         entry   hyp=0, compat=1
@@ -178,18 +229,12 @@ guest_error_invalid_compat:
         entry   hyp=0, compat=1
         invalid BAD_ERROR
 
-ENTRY(return_to_new_vcpu)
-        ldr     x21, [sp, #UREGS_CPSR]
-        and     x21, x21, #PSR_MODE_MASK
-        /* Returning to EL2? */
-        cmp     x21, #PSR_MODE_EL2t
-        ccmp    x21, #PSR_MODE_EL2h, #0x4, ne
-        b.eq    return_to_hypervisor /* Yes */
-        /* Fall thru */
-ENTRY(return_to_guest)
-        bl      leave_hypervisor_tail /* Disables interrupts on return */
-        /* Fall thru */
-ENTRY(return_to_hypervisor)
+ENTRY(return_to_new_vcpu32)
+        exit    hyp=0, compat=1
+ENTRY(return_to_new_vcpu64)
+        exit    hyp=0, compat=0
+
+return_from_trap:
         msr     daifset, #2 /* Mask interrupts */
 
         ldp     x21, x22, [sp, #UREGS_PC]       // load ELR, SPSR
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index a409d11..cf82e21 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -250,9 +250,13 @@ static void continue_new_vcpu(struct vcpu *prev)
 
     if ( is_idle_vcpu(current) )
         reset_stack_and_jump(idle_loop);
+    else if is_pv32_domain(current->domain)
+        /* check_wakeup_from_wait(); */
+        reset_stack_and_jump(return_to_new_vcpu32);
     else
         /* check_wakeup_from_wait(); */
-        reset_stack_and_jump(return_to_new_vcpu);
+        reset_stack_and_jump(return_to_new_vcpu64);
+
 }
 
 void context_switch(struct vcpu *prev, struct vcpu *next)
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.