|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/5] xen: arm: remove hard tabs from asm code.
On Tue, 18 Dec 2012, Ian Campbell wrote:
> Run expand(1) over xen/arch/arm/.../*.S
>
> Add emacs local vars block.
>
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> xen/arch/arm/entry.S | 194 +++++++-------
> xen/arch/arm/head.S | 613
> ++++++++++++++++++++++----------------------
> xen/arch/arm/mode_switch.S | 145 ++++++-----
> xen/arch/arm/proc-ca15.S | 17 +-
> 4 files changed, 498 insertions(+), 471 deletions(-)
>
> diff --git a/xen/arch/arm/entry.S b/xen/arch/arm/entry.S
> index 83793c2..cbd1c48 100644
> --- a/xen/arch/arm/entry.S
> +++ b/xen/arch/arm/entry.S
> @@ -2,79 +2,79 @@
> #include <asm/asm_defns.h>
> #include <public/xen.h>
>
> -#define SAVE_ONE_BANKED(reg) mrs r11, reg; str r11, [sp, #UREGS_##reg]
> -#define RESTORE_ONE_BANKED(reg) ldr r11, [sp, #UREGS_##reg]; msr reg,
> r11
> +#define SAVE_ONE_BANKED(reg) mrs r11, reg; str r11, [sp, #UREGS_##reg]
> +#define RESTORE_ONE_BANKED(reg) ldr r11, [sp, #UREGS_##reg]; msr reg, r11
>
> #define SAVE_BANKED(mode) \
> - SAVE_ONE_BANKED(SP_##mode) ; SAVE_ONE_BANKED(LR_##mode) ;
> SAVE_ONE_BANKED(SPSR_##mode)
> + SAVE_ONE_BANKED(SP_##mode) ; SAVE_ONE_BANKED(LR_##mode) ;
> SAVE_ONE_BANKED(SPSR_##mode)
>
> #define RESTORE_BANKED(mode) \
> - RESTORE_ONE_BANKED(SP_##mode) ; RESTORE_ONE_BANKED(LR_##mode) ;
> RESTORE_ONE_BANKED(SPSR_##mode)
> + RESTORE_ONE_BANKED(SP_##mode) ; RESTORE_ONE_BANKED(LR_##mode) ;
> RESTORE_ONE_BANKED(SPSR_##mode)
>
> -#define SAVE_ALL \
> - sub sp, #(UREGS_R8_fiq - UREGS_sp); /* SP, LR, SPSR, PC */ \
> - push {r0-r12}; /* Save R0-R12 */ \
> - \
> - mrs r11, ELR_hyp; /* ELR_hyp is return address. */\
> - str r11, [sp, #UREGS_pc]; \
> - \
> - str lr, [sp, #UREGS_lr]; \
> - \
> - add r11, sp, #UREGS_kernel_sizeof+4; \
> - str r11, [sp, #UREGS_sp]; \
> - \
> - mrs r11, SPSR_hyp; \
> - str r11, [sp, #UREGS_cpsr]; \
> - and r11, #PSR_MODE_MASK; \
> - cmp r11, #PSR_MODE_HYP; \
> - blne save_guest_regs
> +#define SAVE_ALL \
> + sub sp, #(UREGS_R8_fiq - UREGS_sp); /* SP, LR, SPSR, PC */ \
> + push {r0-r12}; /* Save R0-R12 */ \
> + \
> + mrs r11, ELR_hyp; /* ELR_hyp is return address. */\
> + str r11, [sp, #UREGS_pc]; \
> + \
> + str lr, [sp, #UREGS_lr]; \
> + \
> + add r11, sp, #UREGS_kernel_sizeof+4; \
> + str r11, [sp, #UREGS_sp]; \
> + \
> + mrs r11, SPSR_hyp; \
> + str r11, [sp, #UREGS_cpsr]; \
> + and r11, #PSR_MODE_MASK; \
> + cmp r11, #PSR_MODE_HYP; \
> + blne save_guest_regs
>
> save_guest_regs:
> - ldr r11, =0xffffffff /* Clobber SP which is only valid for
> hypervisor frames. */
> - str r11, [sp, #UREGS_sp]
> - SAVE_ONE_BANKED(SP_usr)
> - /* LR_usr is the same physical register as lr and is saved in
> SAVE_ALL */
> - SAVE_BANKED(svc)
> - SAVE_BANKED(abt)
> - SAVE_BANKED(und)
> - SAVE_BANKED(irq)
> - SAVE_BANKED(fiq)
> - SAVE_ONE_BANKED(R8_fiq); SAVE_ONE_BANKED(R9_fiq);
> SAVE_ONE_BANKED(R10_fiq)
> - SAVE_ONE_BANKED(R11_fiq); SAVE_ONE_BANKED(R12_fiq);
> - mov pc, lr
> + ldr r11, =0xffffffff /* Clobber SP which is only valid for
> hypervisor frames. */
> + str r11, [sp, #UREGS_sp]
> + SAVE_ONE_BANKED(SP_usr)
> + /* LR_usr is the same physical register as lr and is saved in
> SAVE_ALL */
> + SAVE_BANKED(svc)
> + SAVE_BANKED(abt)
> + SAVE_BANKED(und)
> + SAVE_BANKED(irq)
> + SAVE_BANKED(fiq)
> + SAVE_ONE_BANKED(R8_fiq); SAVE_ONE_BANKED(R9_fiq);
> SAVE_ONE_BANKED(R10_fiq)
> + SAVE_ONE_BANKED(R11_fiq); SAVE_ONE_BANKED(R12_fiq);
> + mov pc, lr
>
> -#define DEFINE_TRAP_ENTRY(trap)
> \
> - ALIGN; \
> -trap_##trap: \
> - SAVE_ALL; \
> - cpsie i; /* local_irq_enable */ \
> - adr lr, return_from_trap; \
> - mov r0, sp; \
> - mov r11, sp; \
> - bic sp, #7; /* Align the stack pointer (noop on guest trap) */ \
> - b do_trap_##trap
> +#define DEFINE_TRAP_ENTRY(trap) \
> + ALIGN; \
> +trap_##trap: \
> + SAVE_ALL; \
> + cpsie i; /* local_irq_enable */ \
> + adr lr, return_from_trap; \
> + mov r0, sp; \
> + mov r11, sp; \
> + bic sp, #7; /* Align the stack pointer (noop on guest trap) */ \
> + b do_trap_##trap
>
> -#define DEFINE_TRAP_ENTRY_NOIRQ(trap) \
> - ALIGN; \
> -trap_##trap: \
> - SAVE_ALL; \
> - adr lr, return_from_trap; \
> - mov r0, sp; \
> - mov r11, sp; \
> - bic sp, #7; /* Align the stack pointer (noop on guest trap) */ \
> - b do_trap_##trap
> +#define DEFINE_TRAP_ENTRY_NOIRQ(trap) \
> + ALIGN; \
> +trap_##trap: \
> + SAVE_ALL; \
> + adr lr, return_from_trap; \
> + mov r0, sp; \
> + mov r11, sp; \
> + bic sp, #7; /* Align the stack pointer (noop on guest trap) */ \
> + b do_trap_##trap
>
> .globl hyp_traps_vector
> - .align 5
> + .align 5
> hyp_traps_vector:
> - .word 0 /* 0x00 - Reset */
> - b trap_undefined_instruction /* 0x04 - Undefined Instruction */
> - b trap_supervisor_call /* 0x08 - Supervisor Call */
> - b trap_prefetch_abort /* 0x0c - Prefetch Abort */
> - b trap_data_abort /* 0x10 - Data Abort */
> - b trap_hypervisor /* 0x14 - Hypervisor */
> - b trap_irq /* 0x18 - IRQ */
> - b trap_fiq /* 0x1c - FIQ */
> + .word 0 /* 0x00 - Reset */
> + b trap_undefined_instruction /* 0x04 - Undefined Instruction */
> + b trap_supervisor_call /* 0x08 - Supervisor Call */
> + b trap_prefetch_abort /* 0x0c - Prefetch Abort */
> + b trap_data_abort /* 0x10 - Data Abort */
> + b trap_hypervisor /* 0x14 - Hypervisor */
> + b trap_irq /* 0x18 - IRQ */
> + b trap_fiq /* 0x1c - FIQ */
>
> DEFINE_TRAP_ENTRY(undefined_instruction)
> DEFINE_TRAP_ENTRY(supervisor_call)
> @@ -85,38 +85,38 @@ DEFINE_TRAP_ENTRY_NOIRQ(irq)
> DEFINE_TRAP_ENTRY_NOIRQ(fiq)
>
> return_from_trap:
> - mov sp, r11
> + mov sp, r11
> ENTRY(return_to_new_vcpu)
> - ldr r11, [sp, #UREGS_cpsr]
> - and r11, #PSR_MODE_MASK
> - cmp r11, #PSR_MODE_HYP
> - beq return_to_hypervisor
> - /* Fall thru */
> + ldr r11, [sp, #UREGS_cpsr]
> + and r11, #PSR_MODE_MASK
> + cmp r11, #PSR_MODE_HYP
> + beq return_to_hypervisor
> + /* Fall thru */
> ENTRY(return_to_guest)
> - mov r11, sp
> - bic sp, #7 /* Align the stack pointer */
> - bl leave_hypervisor_tail /* Disables interrupts on return */
> - mov sp, r11
> - RESTORE_ONE_BANKED(SP_usr)
> - /* LR_usr is the same physical register as lr and is restored below */
> - RESTORE_BANKED(svc)
> - RESTORE_BANKED(abt)
> - RESTORE_BANKED(und)
> - RESTORE_BANKED(irq)
> - RESTORE_BANKED(fiq)
> - RESTORE_ONE_BANKED(R8_fiq); RESTORE_ONE_BANKED(R9_fiq);
> RESTORE_ONE_BANKED(R10_fiq)
> - RESTORE_ONE_BANKED(R11_fiq); RESTORE_ONE_BANKED(R12_fiq);
> - /* Fall thru */
> + mov r11, sp
> + bic sp, #7 /* Align the stack pointer */
> + bl leave_hypervisor_tail /* Disables interrupts on return */
> + mov sp, r11
> + RESTORE_ONE_BANKED(SP_usr)
> + /* LR_usr is the same physical register as lr and is restored below
> */
> + RESTORE_BANKED(svc)
> + RESTORE_BANKED(abt)
> + RESTORE_BANKED(und)
> + RESTORE_BANKED(irq)
> + RESTORE_BANKED(fiq)
> + RESTORE_ONE_BANKED(R8_fiq); RESTORE_ONE_BANKED(R9_fiq);
> RESTORE_ONE_BANKED(R10_fiq)
> + RESTORE_ONE_BANKED(R11_fiq); RESTORE_ONE_BANKED(R12_fiq);
> + /* Fall thru */
> ENTRY(return_to_hypervisor)
> - cpsid i
> - ldr lr, [sp, #UREGS_lr]
> - ldr r11, [sp, #UREGS_pc]
> - msr ELR_hyp, r11
> - ldr r11, [sp, #UREGS_cpsr]
> - msr SPSR_hyp, r11
> - pop {r0-r12}
> - add sp, #(UREGS_R8_fiq - UREGS_sp); /* SP, LR, SPSR, PC */
> - eret
> + cpsid i
> + ldr lr, [sp, #UREGS_lr]
> + ldr r11, [sp, #UREGS_pc]
> + msr ELR_hyp, r11
> + ldr r11, [sp, #UREGS_cpsr]
> + msr SPSR_hyp, r11
> + pop {r0-r12}
> + add sp, #(UREGS_R8_fiq - UREGS_sp); /* SP, LR, SPSR, PC */
> + eret
>
> /*
> * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
> @@ -127,9 +127,15 @@ ENTRY(return_to_hypervisor)
> * Returns prev in r0
> */
> ENTRY(__context_switch)
> - add ip, r0, #VCPU_arch_saved_context
> - stmia ip!, {r4 - sl, fp, sp, lr} /* Save register state */
> + add ip, r0, #VCPU_arch_saved_context
> + stmia ip!, {r4 - sl, fp, sp, lr} /* Save register state */
>
> - add r4, r1, #VCPU_arch_saved_context
> - ldmia r4, {r4 - sl, fp, sp, pc} /* Load registers and return
> */
> + add r4, r1, #VCPU_arch_saved_context
> + ldmia r4, {r4 - sl, fp, sp, pc} /* Load registers and return
> */
>
> +/*
> + * Local variables:
> + * mode: ASM
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/head.S b/xen/arch/arm/head.S
> index 8e2e284..0d9a799 100644
> --- a/xen/arch/arm/head.S
> +++ b/xen/arch/arm/head.S
> @@ -36,366 +36,366 @@
> * Clobbers r0-r3. */
> #ifdef EARLY_UART_ADDRESS
> #define PRINT(_s) \
> - adr r0, 98f ; \
> - bl puts ; \
> - b 99f ; \
> -98: .asciz _s ; \
> - .align 2 ; \
> + adr r0, 98f ; \
> + bl puts ; \
> + b 99f ; \
> +98: .asciz _s ; \
> + .align 2 ; \
> 99:
> #else
> #define PRINT(s)
> #endif
>
> - .arm
> + .arm
>
> - /* This must be the very first address in the loaded image.
> - * It should be linked at XEN_VIRT_START, and loaded at any
> - * 2MB-aligned address. All of text+data+bss must fit in 2MB,
> - * or the initial pagetable code below will need adjustment. */
> - .global start
> + /* This must be the very first address in the loaded image.
> + * It should be linked at XEN_VIRT_START, and loaded at any
> + * 2MB-aligned address. All of text+data+bss must fit in 2MB,
> + * or the initial pagetable code below will need adjustment. */
> + .global start
> start:
>
> - /* zImage magic header, see:
> - *
> http://www.simtec.co.uk/products/SWLINUX/files/booting_article.html#d0e309
> - */
> - .rept 8
> - mov r0, r0
> - .endr
> - b past_zImage
> + /* zImage magic header, see:
> + *
> http://www.simtec.co.uk/products/SWLINUX/files/booting_article.html#d0e309
> + */
> + .rept 8
> + mov r0, r0
> + .endr
> + b past_zImage
>
> - .word ZIMAGE_MAGIC_NUMBER /* Magic numbers to help the loader */
> - .word 0x00000000 /* absolute load/run zImage address or
> - * 0 for PiC */
> - .word (_end - start) /* zImage end address */
> + .word ZIMAGE_MAGIC_NUMBER /* Magic numbers to help the loader */
> + .word 0x00000000 /* absolute load/run zImage address or
> + * 0 for PiC */
> + .word (_end - start) /* zImage end address */
>
> past_zImage:
> - cpsid aif /* Disable all interrupts */
> + cpsid aif /* Disable all interrupts */
>
> - /* Save the bootloader arguments in less-clobberable registers */
> - mov r7, r1 /* r7 := ARM-linux machine type */
> - mov r8, r2 /* r8 := ATAG base address */
> + /* Save the bootloader arguments in less-clobberable registers */
> + mov r7, r1 /* r7 := ARM-linux machine type */
> + mov r8, r2 /* r8 := ATAG base address */
>
> - /* Find out where we are */
> - ldr r0, =start
> - adr r9, start /* r9 := paddr (start) */
> - sub r10, r9, r0 /* r10 := phys-offset */
> + /* Find out where we are */
> + ldr r0, =start
> + adr r9, start /* r9 := paddr (start) */
> + sub r10, r9, r0 /* r10 := phys-offset */
>
> - /* Using the DTB in the .dtb section? */
> + /* Using the DTB in the .dtb section? */
> #ifdef CONFIG_DTB_FILE
> - ldr r8, =_sdtb
> - add r8, r10 /* r8 := paddr(DTB) */
> + ldr r8, =_sdtb
> + add r8, r10 /* r8 := paddr(DTB) */
> #endif
>
> - /* Are we the boot CPU? */
> - mov r12, #0 /* r12 := CPU ID */
> - mrc CP32(r0, MPIDR)
> - tst r0, #(1<<31) /* Multiprocessor extension supported? */
> - beq boot_cpu
> - tst r0, #(1<<30) /* Uniprocessor system? */
> - bne boot_cpu
> - bics r12, r0, #(0xff << 24) /* Mask out flags to get CPU ID */
> - beq boot_cpu /* If we're CPU 0, boot now */
> -
> - /* Non-boot CPUs wait here to be woken up one at a time. */
> -1: dsb
> - ldr r0, =smp_up_cpu /* VA of gate */
> - add r0, r0, r10 /* PA of gate */
> - ldr r1, [r0] /* Which CPU is being booted? */
> - teq r1, r12 /* Is it us? */
> - wfene
> - bne 1b
> + /* Are we the boot CPU? */
> + mov r12, #0 /* r12 := CPU ID */
> + mrc CP32(r0, MPIDR)
> + tst r0, #(1<<31) /* Multiprocessor extension supported?
> */
> + beq boot_cpu
> + tst r0, #(1<<30) /* Uniprocessor system? */
> + bne boot_cpu
> + bics r12, r0, #(0xff << 24) /* Mask out flags to get CPU ID */
> + beq boot_cpu /* If we're CPU 0, boot now */
> +
> + /* Non-boot CPUs wait here to be woken up one at a time. */
> +1: dsb
> + ldr r0, =smp_up_cpu /* VA of gate */
> + add r0, r0, r10 /* PA of gate */
> + ldr r1, [r0] /* Which CPU is being booted? */
> + teq r1, r12 /* Is it us? */
> + wfene
> + bne 1b
>
> boot_cpu:
> #ifdef EARLY_UART_ADDRESS
> - ldr r11, =EARLY_UART_ADDRESS /* r11 := UART base address */
> - teq r12, #0 /* CPU 0 sets up the UART too */
> - bleq init_uart
> - PRINT("- CPU ")
> - mov r0, r12
> - bl putn
> - PRINT(" booting -\r\n")
> + ldr r11, =EARLY_UART_ADDRESS /* r11 := UART base address */
> + teq r12, #0 /* CPU 0 sets up the UART too */
> + bleq init_uart
> + PRINT("- CPU ")
> + mov r0, r12
> + bl putn
> + PRINT(" booting -\r\n")
> #endif
>
> - /* Wake up secondary cpus */
> - teq r12, #0
> - bleq kick_cpus
> -
> - /* Check that this CPU has Hyp mode */
> - mrc CP32(r0, ID_PFR1)
> - and r0, r0, #0xf000 /* Bits 12-15 define virt extensions */
> - teq r0, #0x1000 /* Must == 0x1 or may be incompatible */
> - beq 1f
> - PRINT("- CPU doesn't support the virtualization extensions -\r\n")
> - b fail
> + /* Wake up secondary cpus */
> + teq r12, #0
> + bleq kick_cpus
> +
> + /* Check that this CPU has Hyp mode */
> + mrc CP32(r0, ID_PFR1)
> + and r0, r0, #0xf000 /* Bits 12-15 define virt extensions */
> + teq r0, #0x1000 /* Must == 0x1 or may be incompatible */
> + beq 1f
> + PRINT("- CPU doesn't support the virtualization extensions -\r\n")
> + b fail
> 1:
> - /* Check if we're already in it */
> - mrs r0, cpsr
> - and r0, r0, #0x1f /* Mode is in the low 5 bits of CPSR */
> - teq r0, #0x1a /* Hyp Mode? */
> - bne 1f
> - PRINT("- Started in Hyp mode -\r\n")
> - b hyp
> + /* Check if we're already in it */
> + mrs r0, cpsr
> + and r0, r0, #0x1f /* Mode is in the low 5 bits of CPSR */
> + teq r0, #0x1a /* Hyp Mode? */
> + bne 1f
> + PRINT("- Started in Hyp mode -\r\n")
> + b hyp
> 1:
> - /* Otherwise, it must have been Secure Supervisor mode */
> - mrc CP32(r0, SCR)
> - tst r0, #0x1 /* Not-Secure bit set? */
> - beq 1f
> - PRINT("- CPU is not in Hyp mode or Secure state -\r\n")
> - b fail
> + /* Otherwise, it must have been Secure Supervisor mode */
> + mrc CP32(r0, SCR)
> + tst r0, #0x1 /* Not-Secure bit set? */
> + beq 1f
> + PRINT("- CPU is not in Hyp mode or Secure state -\r\n")
> + b fail
> 1:
> - /* OK, we're in Secure state. */
> - PRINT("- Started in Secure state -\r\n- Entering Hyp mode -\r\n")
> - ldr r0, =enter_hyp_mode /* VA of function */
> - adr lr, hyp /* Set return address for call */
> - add pc, r0, r10 /* Call PA of function */
> + /* OK, we're in Secure state. */
> + PRINT("- Started in Secure state -\r\n- Entering Hyp mode -\r\n")
> + ldr r0, =enter_hyp_mode /* VA of function */
> + adr lr, hyp /* Set return address for call */
> + add pc, r0, r10 /* Call PA of function */
>
> hyp:
>
> - /* Zero BSS On the boot CPU to avoid nasty surprises */
> - teq r12, #0
> - bne skip_bss
> -
> - PRINT("- Zero BSS -\r\n")
> - ldr r0, =__bss_start /* Load start & end of bss */
> - ldr r1, =__bss_end
> - add r0, r0, r10 /* Apply physical offset */
> - add r1, r1, r10
> -
> - mov r2, #0
> -1: str r2, [r0], #4
> - cmp r0, r1
> - blo 1b
> -
> -skip_bss:
> -
> - PRINT("- Setting up control registers -\r\n")
> -
> - /* Read CPU ID */
> - mrc CP32(r0, MIDR)
> - ldr r1, =(MIDR_MASK)
> - and r0, r0, r1
> - /* Is this a Cortex A15? */
> - ldr r1, =(CORTEX_A15_ID)
> - teq r0, r1
> - bleq cortex_a15_init
> -
> - /* Set up memory attribute type tables */
> - ldr r0, =MAIR0VAL
> - ldr r1, =MAIR1VAL
> - mcr CP32(r0, MAIR0)
> - mcr CP32(r1, MAIR1)
> - mcr CP32(r0, HMAIR0)
> - mcr CP32(r1, HMAIR1)
> -
> - /* Set up the HTCR:
> - * PT walks use Outer-Shareable accesses,
> - * PT walks are write-back, no-write-allocate in both cache levels,
> - * Full 32-bit address space goes through this table. */
> - ldr r0, =0x80002500
> - mcr CP32(r0, HTCR)
> -
> - /* Set up the HSCTLR:
> - * Exceptions in LE ARM,
> - * Low-latency IRQs disabled,
> - * Write-implies-XN disabled (for now),
> - * D-cache disabled (for now),
> - * I-cache enabled,
> - * Alignment checking enabled,
> - * MMU translation disabled (for now). */
> - ldr r0, =(HSCTLR_BASE|SCTLR_A)
> - mcr CP32(r0, HSCTLR)
> -
> - /* Write Xen's PT's paddr into the HTTBR */
> - ldr r4, =xen_pgtable
> - add r4, r4, r10 /* r4 := paddr (xen_pagetable) */
> - mov r5, #0 /* r4:r5 is paddr (xen_pagetable) */
> - mcrr CP64(r4, r5, HTTBR)
> -
> - /* Non-boot CPUs don't need to rebuild the pagetable */
> - teq r12, #0
> - bne pt_ready
> -
> - /* console fixmap */
> + /* Zero BSS On the boot CPU to avoid nasty surprises */
> + teq r12, #0
> + bne skip_bss
> +
> + PRINT("- Zero BSS -\r\n")
> + ldr r0, =__bss_start /* Load start & end of bss */
> + ldr r1, =__bss_end
> + add r0, r0, r10 /* Apply physical offset */
> + add r1, r1, r10
> +
> + mov r2, #0
> +1: str r2, [r0], #4
> + cmp r0, r1
> + blo 1b
> +
> +skip_bss:
> +
> + PRINT("- Setting up control registers -\r\n")
> +
> + /* Read CPU ID */
> + mrc CP32(r0, MIDR)
> + ldr r1, =(MIDR_MASK)
> + and r0, r0, r1
> + /* Is this a Cortex A15? */
> + ldr r1, =(CORTEX_A15_ID)
> + teq r0, r1
> + bleq cortex_a15_init
> +
> + /* Set up memory attribute type tables */
> + ldr r0, =MAIR0VAL
> + ldr r1, =MAIR1VAL
> + mcr CP32(r0, MAIR0)
> + mcr CP32(r1, MAIR1)
> + mcr CP32(r0, HMAIR0)
> + mcr CP32(r1, HMAIR1)
> +
> + /* Set up the HTCR:
> + * PT walks use Outer-Shareable accesses,
> + * PT walks are write-back, no-write-allocate in both cache levels,
> + * Full 32-bit address space goes through this table. */
> + ldr r0, =0x80002500
> + mcr CP32(r0, HTCR)
> +
> + /* Set up the HSCTLR:
> + * Exceptions in LE ARM,
> + * Low-latency IRQs disabled,
> + * Write-implies-XN disabled (for now),
> + * D-cache disabled (for now),
> + * I-cache enabled,
> + * Alignment checking enabled,
> + * MMU translation disabled (for now). */
> + ldr r0, =(HSCTLR_BASE|SCTLR_A)
> + mcr CP32(r0, HSCTLR)
> +
> + /* Write Xen's PT's paddr into the HTTBR */
> + ldr r4, =xen_pgtable
> + add r4, r4, r10 /* r4 := paddr (xen_pagetable) */
> + mov r5, #0 /* r4:r5 is paddr (xen_pagetable) */
> + mcrr CP64(r4, r5, HTTBR)
> +
> + /* Non-boot CPUs don't need to rebuild the pagetable */
> + teq r12, #0
> + bne pt_ready
> +
> + /* console fixmap */
> #ifdef EARLY_UART_ADDRESS
> - ldr r1, =xen_fixmap
> - add r1, r1, r10 /* r1 := paddr (xen_fixmap) */
> - mov r3, #0
> - lsr r2, r11, #12
> - lsl r2, r2, #12 /* 4K aligned paddr of UART */
> - orr r2, r2, #PT_UPPER(DEV_L3)
> - orr r2, r2, #PT_LOWER(DEV_L3) /* r2:r3 := 4K dev map including UART
> */
> - strd r2, r3, [r1, #(FIXMAP_CONSOLE*8)] /* Map it in the first
> fixmap's slot */
> + ldr r1, =xen_fixmap
> + add r1, r1, r10 /* r1 := paddr (xen_fixmap) */
> + mov r3, #0
> + lsr r2, r11, #12
> + lsl r2, r2, #12 /* 4K aligned paddr of UART */
> + orr r2, r2, #PT_UPPER(DEV_L3)
> + orr r2, r2, #PT_LOWER(DEV_L3) /* r2:r3 := 4K dev map including
> UART */
> + strd r2, r3, [r1, #(FIXMAP_CONSOLE*8)] /* Map it in the first
> fixmap's slot */
> #endif
>
> - /* Build the baseline idle pagetable's first-level entries */
> - ldr r1, =xen_second
> - add r1, r1, r10 /* r1 := paddr (xen_second) */
> - mov r3, #0x0
> - orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of xen_second */
> - orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
> - strd r2, r3, [r4, #0] /* Map it in slot 0 */
> - add r2, r2, #0x1000
> - strd r2, r3, [r4, #8] /* Map 2nd page in slot 1 */
> - add r2, r2, #0x1000
> - strd r2, r3, [r4, #16] /* Map 3rd page in slot 2 */
> - add r2, r2, #0x1000
> - strd r2, r3, [r4, #24] /* Map 4th page in slot 3 */
> -
> - /* Now set up the second-level entries */
> - orr r2, r9, #PT_UPPER(MEM)
> - orr r2, r2, #PT_LOWER(MEM) /* r2:r3 := 2MB normal map of Xen */
> - mov r4, r9, lsr #18 /* Slot for paddr(start) */
> - strd r2, r3, [r1, r4] /* Map Xen there */
> - ldr r4, =start
> - lsr r4, #18 /* Slot for vaddr(start) */
> - strd r2, r3, [r1, r4] /* Map Xen there too */
> -
> - /* xen_fixmap pagetable */
> - ldr r2, =xen_fixmap
> - add r2, r2, r10 /* r2 := paddr (xen_fixmap) */
> - orr r2, r2, #PT_UPPER(PT)
> - orr r2, r2, #PT_LOWER(PT) /* r2:r3 := table map of xen_fixmap */
> - add r4, r4, #8
> - strd r2, r3, [r1, r4] /* Map it in the fixmap's slot */
> -
> - mov r3, #0x0
> - lsr r2, r8, #21
> - lsl r2, r2, #21 /* 2MB-aligned paddr of DTB */
> - orr r2, r2, #PT_UPPER(MEM)
> - orr r2, r2, #PT_LOWER(MEM) /* r2:r3 := 2MB RAM incl. DTB */
> - add r4, r4, #8
> - strd r2, r3, [r1, r4] /* Map it in the early boot slot */
> + /* Build the baseline idle pagetable's first-level entries */
> + ldr r1, =xen_second
> + add r1, r1, r10 /* r1 := paddr (xen_second) */
> + mov r3, #0x0
> + orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of xen_second */
> + orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
> + strd r2, r3, [r4, #0] /* Map it in slot 0 */
> + add r2, r2, #0x1000
> + strd r2, r3, [r4, #8] /* Map 2nd page in slot 1 */
> + add r2, r2, #0x1000
> + strd r2, r3, [r4, #16] /* Map 3rd page in slot 2 */
> + add r2, r2, #0x1000
> + strd r2, r3, [r4, #24] /* Map 4th page in slot 3 */
> +
> + /* Now set up the second-level entries */
> + orr r2, r9, #PT_UPPER(MEM)
> + orr r2, r2, #PT_LOWER(MEM) /* r2:r3 := 2MB normal map of Xen */
> + mov r4, r9, lsr #18 /* Slot for paddr(start) */
> + strd r2, r3, [r1, r4] /* Map Xen there */
> + ldr r4, =start
> + lsr r4, #18 /* Slot for vaddr(start) */
> + strd r2, r3, [r1, r4] /* Map Xen there too */
> +
> + /* xen_fixmap pagetable */
> + ldr r2, =xen_fixmap
> + add r2, r2, r10 /* r2 := paddr (xen_fixmap) */
> + orr r2, r2, #PT_UPPER(PT)
> + orr r2, r2, #PT_LOWER(PT) /* r2:r3 := table map of xen_fixmap */
> + add r4, r4, #8
> + strd r2, r3, [r1, r4] /* Map it in the fixmap's slot */
> +
> + mov r3, #0x0
> + lsr r2, r8, #21
> + lsl r2, r2, #21 /* 2MB-aligned paddr of DTB */
> + orr r2, r2, #PT_UPPER(MEM)
> + orr r2, r2, #PT_LOWER(MEM) /* r2:r3 := 2MB RAM incl. DTB */
> + add r4, r4, #8
> + strd r2, r3, [r1, r4] /* Map it in the early boot slot */
>
> pt_ready:
> - PRINT("- Turning on paging -\r\n")
> -
> - ldr r1, =paging /* Explicit vaddr, not RIP-relative */
> - mrc CP32(r0, HSCTLR)
> - orr r0, r0, #(SCTLR_M|SCTLR_C) /* Enable MMU and D-cache */
> - dsb /* Flush PTE writes and finish reads */
> - mcr CP32(r0, HSCTLR) /* now paging is enabled */
> - isb /* Now, flush the icache */
> - mov pc, r1 /* Get a proper vaddr into PC */
> + PRINT("- Turning on paging -\r\n")
> +
> + ldr r1, =paging /* Explicit vaddr, not RIP-relative */
> + mrc CP32(r0, HSCTLR)
> + orr r0, r0, #(SCTLR_M|SCTLR_C) /* Enable MMU and D-cache */
> + dsb /* Flush PTE writes and finish reads */
> + mcr CP32(r0, HSCTLR) /* now paging is enabled */
> + isb /* Now, flush the icache */
> + mov pc, r1 /* Get a proper vaddr into PC */
> paging:
>
>
> #ifdef EARLY_UART_ADDRESS
> - /* Use a virtual address to access the UART. */
> - ldr r11, =FIXMAP_ADDR(FIXMAP_CONSOLE)
> + /* Use a virtual address to access the UART. */
> + ldr r11, =FIXMAP_ADDR(FIXMAP_CONSOLE)
> #endif
>
> - PRINT("- Ready -\r\n")
> -
> - /* The boot CPU should go straight into C now */
> - teq r12, #0
> - beq launch
> -
> - /* Non-boot CPUs need to move on to the relocated pagetables */
> - mov r0, #0
> - ldr r4, =boot_httbr /* VA of HTTBR value stashed by CPU 0 */
> - add r4, r4, r10 /* PA of it */
> - ldrd r4, r5, [r4] /* Actual value */
> - dsb
> - mcrr CP64(r4, r5, HTTBR)
> - dsb
> - isb
> - mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */
> - mcr CP32(r0, ICIALLU) /* Flush I-cache */
> - mcr CP32(r0, BPIALL) /* Flush branch predictor */
> - dsb /* Ensure completion of TLB+BP flush */
> - isb
> -
> - /* Non-boot CPUs report that they've got this far */
> - ldr r0, =ready_cpus
> -1: ldrex r1, [r0] /* { read # of ready CPUs } */
> - add r1, r1, #1 /* Atomically { ++ } */
> - strex r2, r1, [r0] /* { writeback } */
> - teq r2, #0
> - bne 1b
> - dsb
> - mcr CP32(r0, DCCMVAC) /* flush D-Cache */
> - dsb
> -
> - /* Here, the non-boot CPUs must wait again -- they're now running on
> - * the boot CPU's pagetables so it's safe for the boot CPU to
> - * overwrite the non-relocated copy of Xen. Once it's done that,
> - * and brought up the memory allocator, non-boot CPUs can get their
> - * own stacks and enter C. */
> -1: wfe
> - dsb
> - ldr r0, =smp_up_cpu
> - ldr r1, [r0] /* Which CPU is being booted? */
> - teq r1, r12 /* Is it us? */
> - bne 1b
> -
> -launch:
> - ldr r0, =init_stack /* Find the boot-time stack */
> - ldr sp, [r0]
> - add sp, #STACK_SIZE /* (which grows down from the top). */
> - sub sp, #CPUINFO_sizeof /* Make room for CPU save record */
> - mov r0, r10 /* Marshal args: - phys_offset */
> - mov r1, r7 /* - machine type */
> - mov r2, r8 /* - ATAG address */
> - movs r3, r12 /* - CPU ID */
> - beq start_xen /* and disappear into the land of C */
> - b start_secondary /* (to the appropriate entry point) */
> + PRINT("- Ready -\r\n")
> +
> + /* The boot CPU should go straight into C now */
> + teq r12, #0
> + beq launch
> +
> + /* Non-boot CPUs need to move on to the relocated pagetables */
> + mov r0, #0
> + ldr r4, =boot_httbr /* VA of HTTBR value stashed by CPU 0 */
> + add r4, r4, r10 /* PA of it */
> + ldrd r4, r5, [r4] /* Actual value */
> + dsb
> + mcrr CP64(r4, r5, HTTBR)
> + dsb
> + isb
> + mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */
> + mcr CP32(r0, ICIALLU) /* Flush I-cache */
> + mcr CP32(r0, BPIALL) /* Flush branch predictor */
> + dsb /* Ensure completion of TLB+BP flush */
> + isb
> +
> + /* Non-boot CPUs report that they've got this far */
> + ldr r0, =ready_cpus
> +1: ldrex r1, [r0] /* { read # of ready CPUs }
> */
> + add r1, r1, #1 /* Atomically { ++ }
> */
> + strex r2, r1, [r0] /* { writeback }
> */
> + teq r2, #0
> + bne 1b
> + dsb
> + mcr CP32(r0, DCCMVAC) /* flush D-Cache */
> + dsb
> +
> + /* Here, the non-boot CPUs must wait again -- they're now running on
> + * the boot CPU's pagetables so it's safe for the boot CPU to
> + * overwrite the non-relocated copy of Xen. Once it's done that,
> + * and brought up the memory allocator, non-boot CPUs can get their
> + * own stacks and enter C. */
> +1: wfe
> + dsb
> + ldr r0, =smp_up_cpu
> + ldr r1, [r0] /* Which CPU is being booted? */
> + teq r1, r12 /* Is it us? */
> + bne 1b
> +
> +launch:
> + ldr r0, =init_stack /* Find the boot-time stack */
> + ldr sp, [r0]
> + add sp, #STACK_SIZE /* (which grows down from the top). */
> + sub sp, #CPUINFO_sizeof /* Make room for CPU save record */
> + mov r0, r10 /* Marshal args: - phys_offset */
> + mov r1, r7 /* - machine type */
> + mov r2, r8 /* - ATAG address */
> + movs r3, r12 /* - CPU ID */
> + beq start_xen /* and disappear into the land of C */
> + b start_secondary /* (to the appropriate entry point) */
>
> /* Fail-stop
> * r0: string explaining why */
> -fail: PRINT("- Boot failed -\r\n")
> -1: wfe
> - b 1b
> +fail: PRINT("- Boot failed -\r\n")
> +1: wfe
> + b 1b
>
> #ifdef EARLY_UART_ADDRESS
>
> /* Bring up the UART. Specific to the PL011 UART.
> * Clobbers r0-r2 */
> init_uart:
> - mov r1, #0x0
> - str r1, [r11, #0x24] /* -> UARTIBRD (Baud divisor fraction) */
> - mov r1, #0x4 /* 7.3728MHz / 0x4 == 16 * 115200 */
> - str r1, [r11, #0x24] /* -> UARTIBRD (Baud divisor integer) */
> - mov r1, #0x60 /* 8n1 */
> - str r1, [r11, #0x24] /* -> UARTLCR_H (Line control) */
> - ldr r1, =0x00000301 /* RXE | TXE | UARTEN */
> - str r1, [r11, #0x30] /* -> UARTCR (Control Register) */
> - adr r0, 1f
> - b puts
> -1: .asciz "- UART enabled -\r\n"
> - .align 4
> + mov r1, #0x0
> + str r1, [r11, #0x24] /* -> UARTIBRD (Baud divisor fraction)
> */
> + mov r1, #0x4 /* 7.3728MHz / 0x4 == 16 * 115200 */
> + str r1, [r11, #0x24] /* -> UARTIBRD (Baud divisor integer) */
> + mov r1, #0x60 /* 8n1 */
> + str r1, [r11, #0x24] /* -> UARTLCR_H (Line control) */
> + ldr r1, =0x00000301 /* RXE | TXE | UARTEN */
> + str r1, [r11, #0x30] /* -> UARTCR (Control Register) */
> + adr r0, 1f
> + b puts
> +1: .asciz "- UART enabled -\r\n"
> + .align 4
>
> /* Print early debug messages. Specific to the PL011 UART.
> * r0: Nul-terminated string to print.
> * Clobbers r0-r2 */
> puts:
> - ldr r2, [r11, #0x18] /* <- UARTFR (Flag register) */
> - tst r2, #0x8 /* Check BUSY bit */
> - bne puts /* Wait for the UART to be ready */
> - ldrb r2, [r0], #1 /* Load next char */
> - teq r2, #0 /* Exit on nul */
> - moveq pc, lr
> - str r2, [r11] /* -> UARTDR (Data Register) */
> - b puts
> + ldr r2, [r11, #0x18] /* <- UARTFR (Flag register) */
> + tst r2, #0x8 /* Check BUSY bit */
> + bne puts /* Wait for the UART to be ready */
> + ldrb r2, [r0], #1 /* Load next char */
> + teq r2, #0 /* Exit on nul */
> + moveq pc, lr
> + str r2, [r11] /* -> UARTDR (Data Register) */
> + b puts
>
> /* Print a 32-bit number in hex. Specific to the PL011 UART.
> * r0: Number to print.
> * clobbers r0-r3 */
> putn:
> - adr r1, hex
> - mov r3, #8
> -1: ldr r2, [r11, #0x18] /* <- UARTFR (Flag register) */
> - tst r2, #0x8 /* Check BUSY bit */
> - bne 1b /* Wait for the UART to be ready */
> - and r2, r0, #0xf0000000 /* Mask off the top nybble */
> - ldrb r2, [r1, r2, lsr #28] /* Convert to a char */
> - str r2, [r11] /* -> UARTDR (Data Register) */
> - lsl r0, #4 /* Roll it through one nybble at a time
> */
> - subs r3, r3, #1
> - bne 1b
> - mov pc, lr
> -
> -hex: .ascii "0123456789abcdef"
> - .align 2
> + adr r1, hex
> + mov r3, #8
> +1: ldr r2, [r11, #0x18] /* <- UARTFR (Flag register) */
> + tst r2, #0x8 /* Check BUSY bit */
> + bne 1b /* Wait for the UART to be ready */
> + and r2, r0, #0xf0000000 /* Mask off the top nybble */
> + ldrb r2, [r1, r2, lsr #28] /* Convert to a char */
> + str r2, [r11] /* -> UARTDR (Data Register) */
> + lsl r0, #4 /* Roll it through one nybble at a time
> */
> + subs r3, r3, #1
> + bne 1b
> + mov pc, lr
> +
> +hex: .ascii "0123456789abcdef"
> + .align 2
>
> #else /* EARLY_UART_ADDRESS */
>
> @@ -403,6 +403,13 @@ init_uart:
> .global early_puts
> early_puts:
> puts:
> -putn: mov pc, lr
> +putn: mov pc, lr
>
> #endif /* EARLY_UART_ADDRESS */
> +
> +/*
> + * Local variables:
> + * mode: ASM
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/mode_switch.S b/xen/arch/arm/mode_switch.S
> index 7c3b357..3dba38d 100644
> --- a/xen/arch/arm/mode_switch.S
> +++ b/xen/arch/arm/mode_switch.S
> @@ -28,25 +28,25 @@
> /* wake up secondary cpus */
> .globl kick_cpus
> kick_cpus:
> - /* write start paddr to v2m sysreg FLAGSSET register */
> - ldr r0, =(V2M_SYS_MMIO_BASE) /* base V2M sysreg MMIO address
> */
> - dsb
> - mov r2, #0xffffffff
> - str r2, [r0, #(V2M_SYS_FLAGSCLR)]
> - dsb
> - ldr r2, =start
> - add r2, r2, r10
> - str r2, [r0, #(V2M_SYS_FLAGSSET)]
> - dsb
> - /* send an interrupt */
> - ldr r0, =(GIC_BASE_ADDRESS + GIC_DR_OFFSET) /* base GICD MMIO
> address */
> - mov r2, #0x1
> - str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */
> - mov r2, #0xfe0000
> - str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */
> - dsb
> - str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */
> - mov pc, lr
> + /* write start paddr to v2m sysreg FLAGSSET register */
> + ldr r0, =(V2M_SYS_MMIO_BASE) /* base V2M sysreg MMIO
> address */
> + dsb
> + mov r2, #0xffffffff
> + str r2, [r0, #(V2M_SYS_FLAGSCLR)]
> + dsb
> + ldr r2, =start
> + add r2, r2, r10
> + str r2, [r0, #(V2M_SYS_FLAGSSET)]
> + dsb
> + /* send an interrupt */
> + ldr r0, =(GIC_BASE_ADDRESS + GIC_DR_OFFSET) /* base GICD MMIO
> address */
> + mov r2, #0x1
> + str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */
> + mov r2, #0xfe0000
> + str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */
> + dsb
> + str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */
> + mov pc, lr
>
>
> /* Get up a CPU into Hyp mode. Clobbers r0-r3.
> @@ -61,54 +61,61 @@ kick_cpus:
>
> .globl enter_hyp_mode
> enter_hyp_mode:
> - mov r3, lr /* Put return address in non-banked reg
> */
> - cpsid aif, #0x16 /* Enter Monitor mode */
> - mrc CP32(r0, SCR)
> - orr r0, r0, #0x100 /* Set HCE */
> - orr r0, r0, #0xb1 /* Set SCD, AW, FW and NS */
> - bic r0, r0, #0xe /* Clear EA, FIQ and IRQ */
> - mcr CP32(r0, SCR)
> - /* Ugly: the system timer's frequency register is only
> - * programmable in Secure state. Since we don't know where its
> - * memory-mapped control registers live, we can't find out the
> - * right frequency. Use the VE model's default frequency here. */
> - ldr r0, =0x5f5e100 /* 100 MHz */
> - mcr CP32(r0, CNTFRQ)
> - ldr r0, =0x40c00 /* SMP, c11, c10 in non-secure mode */
> - mcr CP32(r0, NSACR)
> - mov r0, #GIC_BASE_ADDRESS
> - add r0, r0, #GIC_DR_OFFSET
> - /* Disable the GIC distributor, on the boot CPU only */
> - mov r1, #0
> - teq r12, #0 /* Is this the boot CPU? */
> - streq r1, [r0]
> - /* Continuing ugliness: Set up the GIC so NS state owns interrupts,
> - * The first 32 interrupts (SGIs & PPIs) must be configured on all
> - * CPUs while the remainder are SPIs and only need to be done one, on
> - * the boot CPU. */
> - add r0, r0, #0x80 /* GICD_IGROUP0 */
> - mov r2, #0xffffffff /* All interrupts to group 1 */
> - teq r12, #0 /* Boot CPU? */
> - str r2, [r0] /* Interrupts 0-31 (SGI & PPI) */
> - streq r2, [r0, #4] /* Interrupts 32-63 (SPI) */
> - streq r2, [r0, #8] /* Interrupts 64-95 (SPI) */
> - /* Disable the GIC CPU interface on all processors */
> - mov r0, #GIC_BASE_ADDRESS
> - add r0, r0, #GIC_CR_OFFSET
> - mov r1, #0
> - str r1, [r0]
> - /* Must drop priority mask below 0x80 before entering NS state */
> - ldr r1, =0xff
> - str r1, [r0, #0x4] /* -> GICC_PMR */
> - /* Reset a few config registers */
> - mov r0, #0
> - mcr CP32(r0, FCSEIDR)
> - mcr CP32(r0, CONTEXTIDR)
> - /* Allow non-secure access to coprocessors, FIQs, VFP and NEON */
> - ldr r1, =0x3fff /* 14 CP bits set, all others clear */
> - mcr CP32(r1, NSACR)
> + mov r3, lr /* Put return address in non-banked reg
> */
> + cpsid aif, #0x16 /* Enter Monitor mode */
> + mrc CP32(r0, SCR)
> + orr r0, r0, #0x100 /* Set HCE */
> + orr r0, r0, #0xb1 /* Set SCD, AW, FW and NS */
> + bic r0, r0, #0xe /* Clear EA, FIQ and IRQ */
> + mcr CP32(r0, SCR)
> + /* Ugly: the system timer's frequency register is only
> + * programmable in Secure state. Since we don't know where its
> + * memory-mapped control registers live, we can't find out the
> + * right frequency. Use the VE model's default frequency here. */
> + ldr r0, =0x5f5e100 /* 100 MHz */
> + mcr CP32(r0, CNTFRQ)
> + ldr r0, =0x40c00 /* SMP, c11, c10 in non-secure mode */
> + mcr CP32(r0, NSACR)
> + mov r0, #GIC_BASE_ADDRESS
> + add r0, r0, #GIC_DR_OFFSET
> + /* Disable the GIC distributor, on the boot CPU only */
> + mov r1, #0
> + teq r12, #0 /* Is this the boot CPU? */
> + streq r1, [r0]
> + /* Continuing ugliness: Set up the GIC so NS state owns interrupts,
> + * The first 32 interrupts (SGIs & PPIs) must be configured on all
> + * CPUs while the remainder are SPIs and only need to be done one, on
> + * the boot CPU. */
> + add r0, r0, #0x80 /* GICD_IGROUP0 */
> + mov r2, #0xffffffff /* All interrupts to group 1 */
> + teq r12, #0 /* Boot CPU? */
> + str r2, [r0] /* Interrupts 0-31 (SGI & PPI) */
> + streq r2, [r0, #4] /* Interrupts 32-63 (SPI) */
> + streq r2, [r0, #8] /* Interrupts 64-95 (SPI) */
> + /* Disable the GIC CPU interface on all processors */
> + mov r0, #GIC_BASE_ADDRESS
> + add r0, r0, #GIC_CR_OFFSET
> + mov r1, #0
> + str r1, [r0]
> + /* Must drop priority mask below 0x80 before entering NS state */
> + ldr r1, =0xff
> + str r1, [r0, #0x4] /* -> GICC_PMR */
> + /* Reset a few config registers */
> + mov r0, #0
> + mcr CP32(r0, FCSEIDR)
> + mcr CP32(r0, CONTEXTIDR)
> + /* Allow non-secure access to coprocessors, FIQs, VFP and NEON */
> + ldr r1, =0x3fff /* 14 CP bits set, all others clear */
> + mcr CP32(r1, NSACR)
>
> - mrs r0, cpsr /* Copy the CPSR */
> - add r0, r0, #0x4 /* 0x16 (Monitor) -> 0x1a (Hyp) */
> - msr spsr_cxsf, r0 /* into the SPSR */
> - movs pc, r3 /* Exception-return into Hyp mode */
> + mrs r0, cpsr /* Copy the CPSR */
> + add r0, r0, #0x4 /* 0x16 (Monitor) -> 0x1a (Hyp) */
> + msr spsr_cxsf, r0 /* into the SPSR */
> + movs pc, r3 /* Exception-return into Hyp mode */
> +
> +/*
> + * Local variables:
> + * mode: ASM
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/proc-ca15.S b/xen/arch/arm/proc-ca15.S
> index 5a5bf64..dcdd42e 100644
> --- a/xen/arch/arm/proc-ca15.S
> +++ b/xen/arch/arm/proc-ca15.S
> @@ -21,8 +21,15 @@
>
> .globl cortex_a15_init
> cortex_a15_init:
> - /* Set up the SMP bit in ACTLR */
> - mrc CP32(r0, ACTLR)
> - orr r0, r0, #(ACTLR_CA15_SMP) /* enable SMP bit*/
> - mcr CP32(r0, ACTLR)
> - mov pc, lr
> + /* Set up the SMP bit in ACTLR */
> + mrc CP32(r0, ACTLR)
> + orr r0, r0, #(ACTLR_CA15_SMP) /* enable SMP bit */
> + mcr CP32(r0, ACTLR)
> + mov pc, lr
> +
> +/*
> + * Local variables:
> + * mode: ASM
> + * indent-tabs-mode: nil
> + * End:
> + */
> --
> 1.7.2.5
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |