|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v9 10/13] xen/arm64: Save/restore CPU context across SYSTEM_SUSPEND
From: Mirela Simonovic <mirela.simonovic@xxxxxxxxxx>
On wakeup from PSCI SYSTEM_SUSPEND, Xen re-enters EL2 with the MMU and
data cache disabled. The resume path must first switch back to Xen's
runtime page tables before it can access the saved CPU context using
virtual addresses.
Add an arm64 hyp_resume trampoline that reuses enable_secondary_cpu_mm()
to enable the data cache and MMU, switch to init_ttbr, and resume in the
runtime virtual mapping. The trampoline then restores the saved CPU
general-purpose and system-control register context.
prepare_resume_ctx() must be invoked just before the PSCI system suspend
call is issued to the platform firmware. It saves the current CPU context
and returns a non-zero value so that the caller enters the physical
SYSTEM_SUSPEND call.
On resume, hyp_resume restores the saved context, including the saved link
register. Control therefore returns to the place where prepare_resume_ctx()
was called. To avoid re-entering the suspend path, the restored path sees
prepare_resume_ctx() return zero.
The assembly save/restore code uses offsets generated by asm-offsets.c
from struct resume_cpu_context, keeping the assembly memory accesses in
sync with the C structure layout.
Support for ARM32 is not implemented. Instead, compilation fails with a
build-time error if suspend is enabled for ARM32.
Signed-off-by: Mirela Simonovic <mirela.simonovic@xxxxxxxxxx>
Signed-off-by: Saeed Nowshadi <saeed.nowshadi@xxxxxxxxxx>
Signed-off-by: Mykyta Poturai <mykyta_poturai@xxxxxxxx>
Signed-off-by: Mykola Kvach <mykola_kvach@xxxxxxxx>
---
Changes in v9:
- Drop the misleading prepare_resume_ctx() pointer argument and make both
save/restore paths use the global resume_cpu_context.
- Squash the arm64 resume trampoline into the context save/restore patch.
- Document in code that hyp_resume relies on PSCI initial-state rules.
- Use generic platform firmware wording instead of ATF-specific wording.
- Rename the saved context type/storage to resume_cpu_context and rely on
implicit zero-initialization for the file-scope object.
- Use asm-offsets.c-generated RESUME_CTX_* offsets to keep the assembly
save/restore code in sync with struct resume_cpu_context.
Changes in v8:
- Fix alignments in code.
Changes in v7:
- No functional changes, just moved commit.
---
xen/arch/arm/Makefile | 1 +
xen/arch/arm/arm64/asm-offsets.c | 20 +++++
xen/arch/arm/arm64/head.S | 118 +++++++++++++++++++++++++++++
xen/arch/arm/include/asm/suspend.h | 26 +++++++
xen/arch/arm/suspend.c | 14 ++++
5 files changed, 179 insertions(+)
create mode 100644 xen/arch/arm/suspend.c
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index 982c6c396a..c97df7f3a0 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -51,6 +51,7 @@ obj-y += setup.o
obj-y += shutdown.o
obj-y += smp.o
obj-y += smpboot.o
+obj-$(CONFIG_SYSTEM_SUSPEND) += suspend.o
obj-$(CONFIG_SYSCTL) += sysctl.o
obj-y += time.o
obj-y += traps.o
diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c
index 38a3894a3b..4da5fff584 100644
--- a/xen/arch/arm/arm64/asm-offsets.c
+++ b/xen/arch/arm/arm64/asm-offsets.c
@@ -13,6 +13,7 @@
#include <asm/mm.h>
#include <asm/setup.h>
#include <asm/smccc.h>
+#include <asm/suspend.h>
#define DEFINE(_sym, _val) \
asm volatile ( "\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\""\
@@ -57,6 +58,25 @@ void __dummy__(void)
OFFSET(INITINFO_stack, struct init_info, stack);
BLANK();
+#ifdef CONFIG_SYSTEM_SUSPEND
+ OFFSET(RESUME_CTX_X19, struct resume_cpu_context, callee_regs[0]);
+ OFFSET(RESUME_CTX_X21, struct resume_cpu_context, callee_regs[2]);
+ OFFSET(RESUME_CTX_X23, struct resume_cpu_context, callee_regs[4]);
+ OFFSET(RESUME_CTX_X25, struct resume_cpu_context, callee_regs[6]);
+ OFFSET(RESUME_CTX_X27, struct resume_cpu_context, callee_regs[8]);
+ OFFSET(RESUME_CTX_X29, struct resume_cpu_context, callee_regs[10]);
+ OFFSET(RESUME_CTX_SP, struct resume_cpu_context, sp);
+ OFFSET(RESUME_CTX_VBAR_EL2, struct resume_cpu_context, vbar_el2);
+ OFFSET(RESUME_CTX_VTCR_EL2, struct resume_cpu_context, vtcr_el2);
+ OFFSET(RESUME_CTX_VTTBR_EL2, struct resume_cpu_context, vttbr_el2);
+ OFFSET(RESUME_CTX_TPIDR_EL2, struct resume_cpu_context, tpidr_el2);
+ OFFSET(RESUME_CTX_MDCR_EL2, struct resume_cpu_context, mdcr_el2);
+ OFFSET(RESUME_CTX_HSTR_EL2, struct resume_cpu_context, hstr_el2);
+ OFFSET(RESUME_CTX_CPTR_EL2, struct resume_cpu_context, cptr_el2);
+ OFFSET(RESUME_CTX_HCR_EL2, struct resume_cpu_context, hcr_el2);
+ BLANK();
+#endif
+
OFFSET(SMCCC_RES_a0, struct arm_smccc_res, a0);
OFFSET(SMCCC_RES_a2, struct arm_smccc_res, a2);
OFFSET(ARM_SMCCC_1_2_REGS_X0_OFFS, struct arm_smccc_1_2_regs, a0);
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 72c7b24498..512a3c35b2 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -561,6 +561,124 @@ END(efi_xen_start)
#endif /* CONFIG_ARM_EFI */
+#ifdef CONFIG_SYSTEM_SUSPEND
+/*
+ * int prepare_resume_ctx(void)
+ *
+ * CPU context saved here will be restored on resume in hyp_resume function.
+ * prepare_resume_ctx shall return a non-zero value. Upon restoring context
+ * hyp_resume shall return value zero instead. From C code that invokes
+ * prepare_resume_ctx, the return value is interpreted to determine whether
+ * the context is saved (prepare_resume_ctx) or restored (hyp_resume).
+ */
+FUNC(prepare_resume_ctx)
+ ldr x0, =resume_cpu_context
+
+ /* Store callee-saved registers */
+ stp x19, x20, [x0, #RESUME_CTX_X19]
+ stp x21, x22, [x0, #RESUME_CTX_X21]
+ stp x23, x24, [x0, #RESUME_CTX_X23]
+ stp x25, x26, [x0, #RESUME_CTX_X25]
+ stp x27, x28, [x0, #RESUME_CTX_X27]
+ stp x29, lr, [x0, #RESUME_CTX_X29]
+
+ /* Store stack-pointer */
+ mov x2, sp
+ str x2, [x0, #RESUME_CTX_SP]
+
+ /* Store system control registers */
+ mrs x2, VBAR_EL2
+ str x2, [x0, #RESUME_CTX_VBAR_EL2]
+ mrs x2, VTCR_EL2
+ str x2, [x0, #RESUME_CTX_VTCR_EL2]
+ mrs x2, VTTBR_EL2
+ str x2, [x0, #RESUME_CTX_VTTBR_EL2]
+ mrs x2, TPIDR_EL2
+ str x2, [x0, #RESUME_CTX_TPIDR_EL2]
+ mrs x2, MDCR_EL2
+ str x2, [x0, #RESUME_CTX_MDCR_EL2]
+ mrs x2, HSTR_EL2
+ str x2, [x0, #RESUME_CTX_HSTR_EL2]
+ mrs x2, CPTR_EL2
+ str x2, [x0, #RESUME_CTX_CPTR_EL2]
+ mrs x2, HCR_EL2
+ str x2, [x0, #RESUME_CTX_HCR_EL2]
+
+ /* prepare_resume_ctx must return a non-zero value */
+ mov x0, #1
+ ret
+END(prepare_resume_ctx)
+
+FUNC(hyp_resume)
+ /*
+ * PSCI states that SYSTEM_SUSPEND follows the CPU_SUSPEND initial
+ * state rules, so PSCI-compliant firmware must enter the return
+ * exception level with DAIF masked.
+ */
+
+ /* Initialize the UART if earlyprintk has been enabled. */
+#ifdef CONFIG_EARLY_PRINTK
+ bl init_uart
+#endif
+ PRINT_ID("- Xen resuming -\r\n")
+
+ bl check_cpu_mode
+ bl cpu_init
+
+ ldr x0, =start
+ adr x20, start /* x20 := paddr (start) */
+ sub x20, x20, x0 /* x20 := phys-offset */
+ ldr lr, =mmu_resumed
+ b enable_secondary_cpu_mm
+
+mmu_resumed:
+ /* Now we can access the saved context, so restore it here. */
+ ldr x0, =resume_cpu_context
+
+ /* Restore callee-saved registers */
+ ldp x19, x20, [x0, #RESUME_CTX_X19]
+ ldp x21, x22, [x0, #RESUME_CTX_X21]
+ ldp x23, x24, [x0, #RESUME_CTX_X23]
+ ldp x25, x26, [x0, #RESUME_CTX_X25]
+ ldp x27, x28, [x0, #RESUME_CTX_X27]
+ ldp x29, lr, [x0, #RESUME_CTX_X29]
+
+ /* Restore stack pointer */
+ ldr x2, [x0, #RESUME_CTX_SP]
+ mov sp, x2
+
+ /* Restore system control registers */
+ ldr x2, [x0, #RESUME_CTX_VBAR_EL2]
+ msr VBAR_EL2, x2
+ ldr x2, [x0, #RESUME_CTX_VTCR_EL2]
+ msr VTCR_EL2, x2
+ ldr x2, [x0, #RESUME_CTX_VTTBR_EL2]
+ msr VTTBR_EL2, x2
+ ldr x2, [x0, #RESUME_CTX_TPIDR_EL2]
+ msr TPIDR_EL2, x2
+ ldr x2, [x0, #RESUME_CTX_MDCR_EL2]
+ msr MDCR_EL2, x2
+ ldr x2, [x0, #RESUME_CTX_HSTR_EL2]
+ msr HSTR_EL2, x2
+ ldr x2, [x0, #RESUME_CTX_CPTR_EL2]
+ msr CPTR_EL2, x2
+ ldr x2, [x0, #RESUME_CTX_HCR_EL2]
+ msr HCR_EL2, x2
+ isb
+
+ /*
+ * Since context is restored return from this function will appear
+ * as return from prepare_resume_ctx. To distinguish a return from
+ * prepare_resume_ctx which is called upon finalizing the suspend,
+ * as opposed to return from this function which executes on resume,
+ * we need to return zero value here.
+ */
+ mov x0, #0
+ ret
+END(hyp_resume)
+
+#endif /* CONFIG_SYSTEM_SUSPEND */
+
/*
* Local variables:
* mode: ASM
diff --git a/xen/arch/arm/include/asm/suspend.h
b/xen/arch/arm/include/asm/suspend.h
index 31a98a1f1b..2d9fc331fc 100644
--- a/xen/arch/arm/include/asm/suspend.h
+++ b/xen/arch/arm/include/asm/suspend.h
@@ -3,6 +3,8 @@
#ifndef ARM_SUSPEND_H
#define ARM_SUSPEND_H
+#include <xen/types.h>
+
struct domain;
struct vcpu;
struct vcpu_guest_context;
@@ -14,6 +16,30 @@ struct resume_info {
void arch_domain_resume(struct domain *d);
+#ifdef CONFIG_SYSTEM_SUSPEND
+#ifdef CONFIG_ARM_64
+struct resume_cpu_context {
+ register_t callee_regs[12];
+ register_t sp;
+ register_t vbar_el2;
+ register_t vtcr_el2;
+ register_t vttbr_el2;
+ register_t tpidr_el2;
+ register_t mdcr_el2;
+ register_t hstr_el2;
+ register_t cptr_el2;
+ register_t hcr_el2;
+} __aligned(16);
+#else
+#error "Define resume_cpu_context structure for arm32"
+#endif
+
+extern struct resume_cpu_context resume_cpu_context;
+
+int prepare_resume_ctx(void);
+void hyp_resume(void);
+#endif /* CONFIG_SYSTEM_SUSPEND */
+
#endif /* ARM_SUSPEND_H */
/*
diff --git a/xen/arch/arm/suspend.c b/xen/arch/arm/suspend.c
new file mode 100644
index 0000000000..6ea4a0f9cc
--- /dev/null
+++ b/xen/arch/arm/suspend.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <asm/suspend.h>
+
+struct resume_cpu_context resume_cpu_context;
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |