[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 05/18] xen/arm: Save ESR_EL2 to avoid using mismatched value in syndrome check



Hi Julien,

On 2017/3/17 1:34, Julien Grall wrote:
> Hi Wei,
>
> On 03/13/2017 10:55 AM, Wei Chen wrote:
>> Xen will do exception syndrome check while some types of exception
>> take place in EL2. The syndrome check code read the ESR_EL2 register
>> directly, but in some situation this register maybe overridden by
>> nested exception.
>>
>> For example, if we re-enable IRQ before reading ESR_EL2 which means
>> Xen will enter in IRQ exception mode and return the processor with
>
> s/will/may/
>

Ok.

>> clobbered ESR_EL2 (See ARM ARM DDI 0487A.j D7.2.25)
>>
>> In this case the guest exception syndrome has been overridden, we will
>> check the syndrome for guest sync exception with a mismatched ESR_EL2
>
> s/mismatched/incorrect/ I think
>
>> value. So we want to save ESR_EL2 to cpu_user_regs as soon as the
>> exception takes place in EL2 to avoid using a mismatched syndrome value.
>
> Ditto.
>

Ok.

>>
>> Signed-off-by: Wei Chen <Wei.Chen@xxxxxxx>
>> ---
>>  xen/arch/arm/arm32/asm-offsets.c      |  1 +
>>  xen/arch/arm/arm32/entry.S            |  3 +++
>>  xen/arch/arm/arm64/asm-offsets.c      |  1 +
>>  xen/arch/arm/arm64/entry.S            | 13 +++++++++----
>>  xen/arch/arm/traps.c                  |  2 +-
>>  xen/include/asm-arm/arm32/processor.h |  2 +-
>>  xen/include/asm-arm/arm64/processor.h | 10 ++++++++--
>>  7 files changed, 24 insertions(+), 8 deletions(-)
>>
>> diff --git a/xen/arch/arm/arm32/asm-offsets.c 
>> b/xen/arch/arm/arm32/asm-offsets.c
>> index f8e6b53..5b543ab 100644
>> --- a/xen/arch/arm/arm32/asm-offsets.c
>> +++ b/xen/arch/arm/arm32/asm-offsets.c
>> @@ -26,6 +26,7 @@ void __dummy__(void)
>>     OFFSET(UREGS_lr, struct cpu_user_regs, lr);
>>     OFFSET(UREGS_pc, struct cpu_user_regs, pc);
>>     OFFSET(UREGS_cpsr, struct cpu_user_regs, cpsr);
>> +   OFFSET(UREGS_hsr, struct cpu_user_regs, hsr);
>>
>>     OFFSET(UREGS_LR_usr, struct cpu_user_regs, lr_usr);
>>     OFFSET(UREGS_SP_usr, struct cpu_user_regs, sp_usr);
>> diff --git a/xen/arch/arm/arm32/entry.S b/xen/arch/arm/arm32/entry.S
>> index 2a6f4f0..2187226 100644
>> --- a/xen/arch/arm/arm32/entry.S
>> +++ b/xen/arch/arm/arm32/entry.S
>> @@ -23,6 +23,9 @@
>>          add r11, sp, #UREGS_kernel_sizeof+4;                            \
>>          str r11, [sp, #UREGS_sp];                                       \
>>                                                                          \
>> +        mrc CP32(r11, HSR);             /* Save exception syndrome */   \
>> +        str r11, [sp, #UREGS_hsr];                                      \
>> +                                                                        \
>>          mrs r11, SPSR_hyp;                                              \
>>          str r11, [sp, #UREGS_cpsr];                                     \
>>          and r11, #PSR_MODE_MASK;                                        \
>> diff --git a/xen/arch/arm/arm64/asm-offsets.c 
>> b/xen/arch/arm/arm64/asm-offsets.c
>> index 69ea92a..ce24e44 100644
>> --- a/xen/arch/arm/arm64/asm-offsets.c
>> +++ b/xen/arch/arm/arm64/asm-offsets.c
>> @@ -27,6 +27,7 @@ void __dummy__(void)
>>     OFFSET(UREGS_SP, struct cpu_user_regs, sp);
>>     OFFSET(UREGS_PC, struct cpu_user_regs, pc);
>>     OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr);
>> +   OFFSET(UREGS_ESR_el2, struct cpu_user_regs, hsr);
>>
>>     OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1);
>>
>> diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
>> index c181b5e..02802c0 100644
>> --- a/xen/arch/arm/arm64/entry.S
>> +++ b/xen/arch/arm/arm64/entry.S
>> @@ -121,9 +121,13 @@ lr      .req    x30             // link register
>>
>>          stp     lr, x21, [sp, #UREGS_LR]
>>
>> -        mrs     x22, elr_el2
>> -        mrs     x23, spsr_el2
>> -        stp     x22, x23, [sp, #UREGS_PC]
>> +        mrs     x21, elr_el2
>> +        str     x21, [sp, #UREGS_PC]
>
> Please explain the commit message you modify the lines above ...
>

I will do it in next version.

>> +
>> +        add     x21, sp, #UREGS_CPSR
>> +        mrs     x22, spsr_el2
>> +        mrs     x23, esr_el2
>> +        stp     w22, w23, [x21]
>>
>>          .endm
>>
>> @@ -307,7 +311,8 @@ ENTRY(return_to_new_vcpu64)
>>  return_from_trap:
>>          msr     daifset, #2 /* Mask interrupts */
>>
>> -        ldp     x21, x22, [sp, #UREGS_PC]       // load ELR, SPSR
>> +        ldr     x21, [sp, #UREGS_PC]            // load ELR
>> +        ldr     w22, [sp, #UREGS_CPSR]          // load SPSR
>
> as long as those one.
>

Ok.

>>
>>          pop     x0, x1
>>          pop     x2, x3
>
> [...]
>
>> diff --git a/xen/include/asm-arm/arm64/processor.h 
>> b/xen/include/asm-arm/arm64/processor.h
>> index b0726ff..d381428 100644
>> --- a/xen/include/asm-arm/arm64/processor.h
>> +++ b/xen/include/asm-arm/arm64/processor.h
>> @@ -65,9 +65,15 @@ struct cpu_user_regs
>>
>>      /* Return address and mode */
>>      __DECL_REG(pc,           pc32);             /* ELR_EL2 */
>> +    /*
>> +     * Be careful for 32-bit registers, if we use xN to save 32-bit register
>> +     * to stack, its next field on stack will be overridden.
>> +     * For example, if we use xN to save SPSR_EL2 to stack will override the
>> +     * hsr field on stack.
>> +     * So, it's better to use wN to save 32-bit registers to stack.
>> +     */
>
> This comment is pointless. This is true for any 32-bit register, you
> should use wN unless you now that you have a padding after.
>

Ok, I will remove it.

>>      uint32_t cpsr;                              /* SPSR_EL2 */
>> -
>> -    uint32_t pad0; /* Align end of kernel frame. */
>> +    uint32_t hsr;                               /* ESR_EL2 */
>>
>>      /* Outer guest frame only from here on... */
>>
>>
>
> Cheers,
>


-- 
Regards,
Wei Chen

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.