|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH v1 22/26] xen/arm/cca: add Realm REC enter path
Add the RMI_REC_ENTER loop for Realm vCPUs. Handle host events,
vGIC/timer sync, WFI/WFE, MMIO and PSCI exits.
Signed-off-by: Koichiro Den <den@xxxxxxxxxxxxx>
---
xen/arch/arm/cca/rec.c | 672 +++++++++++++++++++++++++++
xen/arch/arm/domain.c | 4 +
xen/arch/arm/include/asm/arm64/hsr.h | 1 +
xen/arch/arm/include/asm/hsr.h | 4 +
4 files changed, 681 insertions(+)
diff --git a/xen/arch/arm/cca/rec.c b/xen/arch/arm/cca/rec.c
index 8314a7a45d95..efff7fa48745 100644
--- a/xen/arch/arm/cca/rec.c
+++ b/xen/arch/arm/cca/rec.c
@@ -1,14 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Dedicated Realm REC execution path for Xen/Arm.
+ *
+ * Key points:
+ * - The Realm executes under RMM. When it accesses emulated devices, the RMM
+ * causes a REC exit and provides ESR/FAR/HPFAR plus exit metadata in the REC
+ * exit buffer. With RMI v2.0, vGIC LR state is exchanged through the real
+ * ICH registers rather than REC run-buffer fields (unlike RMI v1.x).
+ * - Xen bridges such exits into the existing Arm MMIO emulation backend
+ * (try_handle_mmio_regops), which already services vGIC and other emulated
+ * devices for non-Realm guests. The emulation reads the guest GPRs and may
+ * return a value to the guest. We propagate that value back to the RMM on
+ * the next REC enter using the RMI REC_ENTER flags/gpr0 contract.
+ * - Around REC enter/exit, Xen synchronizes the vGIC directly with the ICH
+ * registers and feeds the REC exit timer snapshot into Realm-specific
+ * timer state.
+ */
+#include <xen/bug.h>
+#include <xen/errno.h>
#include <xen/lib.h>
#include <xen/sched.h>
+#include <xen/softirq.h>
#include <xen/timer.h>
#include <xen/xmalloc.h>
#include <asm/cca.h>
+#include <asm/current.h>
+#include <asm/domain.h>
+#include <asm/event.h>
+#include <asm/gic.h>
+#include <asm/gic_v3_defs.h>
+#include <asm/hsr.h>
+#include <asm/mm.h>
+#include <asm/mmio.h>
+#include <asm/p2m.h>
+#include <asm/processor.h>
+#include <asm/psci.h>
+#include <asm/sysregs.h>
+#include <asm/system.h>
#include <asm/time.h>
#include <asm/vgic.h>
+#include <public/sched.h>
+
+#include "rmi.h"
+
+struct arm_cca_mmio_ctxt {
+ const struct arm_cca_rmi_rec_exit *exit;
+ register_t result;
+};
+
struct arm_cca_timer {
struct vcpu *v;
unsigned int irq;
@@ -23,6 +65,13 @@ struct arm_cca_vcpu_timers {
struct timer wfx;
};
+/*
+ * Keep Realm guests on the GICv3 sysreg CPU interface. This vGIC path has no
+ * IRQ/FIQ bypass, so ICC_SRE_EL1 is exposed as a fixed SRE|DFB|DIB value.
+ */
+#define ARM_CCA_ICC_SRE_EL1_VALUE \
+ (GICC_SRE_EL2_SRE | GICC_SRE_EL2_DFB | GICC_SRE_EL2_DIB)
+
static void arm_cca_timer_update_irq(struct arm_cca_timer *timer)
{
register_t ctl = timer->ctl;
@@ -88,3 +137,626 @@ void arm_cca_vcpu_timer_destroy(struct vcpu *v)
kill_timer(&timers->wfx);
XFREE(v->arch.cca.timers);
}
+
+static void arm_cca_timer_program(struct arm_cca_timer *timer)
+{
+ s_time_t expires;
+
+ migrate_timer(&timer->timer, timer->v->processor);
+
+ if ( timer->ctl & CNTx_CTL_ENABLE )
+ {
+ expires = (timer->cval > boot_count)
+ ? ticks_to_ns(timer->cval - boot_count)
+ : 0;
+ set_timer(&timer->timer, expires);
+ }
+ else
+ stop_timer(&timer->timer);
+}
+
+static void arm_cca_timer_sync(struct arm_cca_timer *timer,
+ register_t ctl, uint64_t cval)
+{
+ timer->ctl = ctl;
+ timer->cval = cval;
+
+ arm_cca_timer_program(timer);
+ arm_cca_timer_update_irq(timer);
+}
+
+static register_t arm_cca_mmio_read_reg(void *ctxt, int reg)
+{
+ struct arm_cca_mmio_ctxt *mmio = ctxt;
+
+ /*
+ * This callback is used only when Xen's MMIO core emulates a write and
+ * asks for the store data. For a Realm emulatable Data Abort, the RMM ABI
+ * does not provide a full guest GPR file indexed by ESR_EL2.ISS.SRT.
+ * Instead, the write data is provided in run.exit.gprs[0].
+ *
+ * For an emulated read, arm_cca_mmio_write_reg() below captures the value
+ * produced by the device model and arm_cca_set_mmio_result() returns it to
+ * the RMM in run.enter.gprs[0]. The RMM then writes it back to the
+ * faulting guest register selected by ESR_EL2.ISS.SRT.
+ */
+ return mmio->exit->gprs[0];
+}
+
+static void arm_cca_mmio_write_reg(void *ctxt, int reg, register_t value)
+{
+ struct arm_cca_mmio_ctxt *mmio = ctxt;
+
+ mmio->result = value;
+}
+
+static const struct mmio_regops arm_cca_mmio_regops = {
+ .read = arm_cca_mmio_read_reg,
+ .write = arm_cca_mmio_write_reg,
+};
+
+static void check_for_pcpu_work(void)
+{
+ ASSERT(!local_irq_is_enabled());
+
+ while ( softirq_pending(smp_processor_id()) )
+ {
+ local_irq_enable();
+ do_softirq();
+ local_irq_disable();
+ }
+}
+
+static void arm_cca_service_host_events(void)
+{
+ ASSERT(!local_irq_is_enabled());
+
+ local_irq_enable();
+ local_irq_disable();
+ check_for_pcpu_work();
+}
+
+static void arm_cca_check_for_vcpu_work(struct vcpu *v)
+{
+ if ( likely(!v->arch.need_flush_to_ram) )
+ return;
+
+ check_for_pcpu_work();
+
+ local_irq_enable();
+ p2m_flush_vm(v);
+ local_irq_disable();
+}
+
+static void noreturn arm_cca_wait_forever(struct vcpu *v)
+{
+ for ( ; ; )
+ {
+ /* Scheduler helpers expect local IRQs enabled when taking locks */
+ local_irq_enable();
+ vcpu_block();
+ local_irq_disable();
+ arm_cca_service_host_events();
+ }
+}
+
+static void arm_cca_wait_until_online(struct vcpu *v)
+{
+ while ( test_bit(_VPF_down, &v->pause_flags) )
+ {
+ /* Scheduler helpers expect local IRQs enabled when taking locks */
+ local_irq_enable();
+ vcpu_block();
+ local_irq_disable();
+ arm_cca_service_host_events();
+ }
+}
+
+static bool arm_cca_wfxt_expired(register_t timeout)
+{
+ return (int64_t)(get_cycles() - timeout) >= 0;
+}
+
+static void arm_cca_wait_until_wfxt_timeout(struct vcpu *v,
+ register_t timeout)
+{
+ s_time_t expires;
+
+ ASSERT(!local_irq_is_enabled());
+
+ if ( arm_cca_wfxt_expired(timeout) )
+ return;
+
+ expires = (timeout > boot_count) ? ticks_to_ns(timeout - boot_count) : 0;
+
+ set_bit(_VPF_blocked, &v->pause_flags);
+ smp_mb__after_atomic();
+
+ arch_vcpu_block(v);
+
+ if ( local_events_need_delivery_nomask() || arm_cca_wfxt_expired(timeout) )
+ {
+ clear_bit(_VPF_blocked, &v->pause_flags);
+ return;
+ }
+
+ migrate_timer(&v->arch.cca.timers->wfx, v->processor);
+ set_timer(&v->arch.cca.timers->wfx, expires);
+
+ raise_softirq(SCHEDULE_SOFTIRQ);
+
+ /* Let the scheduler softirq run while the WFxT timer can unblock us. */
+ local_irq_enable();
+ local_irq_disable();
+
+ stop_timer(&v->arch.cca.timers->wfx);
+ arm_cca_service_host_events();
+}
+
+static void noreturn arm_cca_domain_crash(struct vcpu *v)
+{
+ domain_crash(v->domain);
+ arm_cca_wait_forever(v);
+}
+
+/* DEN0137 2.0-bet1 - D1.6.1 Interrupt flow. */
+static void arm_cca_prepare_rec_enter(struct arm_cca_rmi_rec_run *run,
+ unsigned long entry_flags,
+ const register_t *entry_gprs)
+{
+ register_t hcr;
+
+ memset(&run->enter, 0, sizeof(run->enter));
+ run->enter.flags = entry_flags;
+ memcpy(run->enter.gprs, entry_gprs, sizeof(run->enter.gprs));
+
+ /*
+ * RMI v2.0 removes the run-buffer GIC fields. The RMM validates and
+ * consumes the real ICH_LR<n>_EL2 state left by the Host, so flush Xen's
+ * vGIC model directly into hardware before REC_ENTER.
+ */
+ vgic_sync_to_lrs();
+
+ /*
+ * DEN0137 A6.1 requires ICH_HCR_EL2.En to be clear on REC exit. Re-enable
+ * the virtual CPU interface for the next Realm entry while preserving the
+ * rest of the GIC CPU interface state carried in ICH_HCR_EL2.
+ */
+ hcr = READ_SYSREG(ICH_HCR_EL2);
+ WRITE_SYSREG(hcr | GICH_HCR_EN, ICH_HCR_EL2);
+ isb();
+}
+
+/* DEN0137 2.0-bet1 - D1.6.1 Interrupt flow. */
+static void arm_cca_sync_vgic_exit(struct vcpu *v)
+{
+ /*
+ * In RMI v2.0 the RMM leaves the GIC owner Plane state in the hardware
+ * ICH registers on REC exit. Consume it directly through Xen's generic
+ * vGIC LR sync path.
+ */
+ isb();
+ vgic_sync_from_lrs(v);
+}
+
+/*
+ * DEN0137 2.0-bet1 - D1.6.2 Timer interrupt delivery flow.
+ * Keep the REC exit timer snapshot in Realm-specific state. The normal
+ * v->arch.{phys,virt}_timer state is tied to non-Realm EL1 context switch
+ * save/restore and must not be overwritten with RMM-owned timer state.
+ */
+static void arm_cca_sync_rec_exit(struct vcpu *v,
+ const struct arm_cca_rmi_rec_exit *exit)
+{
+ arm_cca_sync_vgic_exit(v);
+ arm_cca_timer_sync(&v->arch.cca.timers->phys,
+ exit->cntp_ctl, exit->cntp_cval);
+ arm_cca_timer_sync(&v->arch.cca.timers->virt,
+ exit->cntv_ctl, exit->cntv_cval);
+}
+
+static paddr_t arm_cca_mmio_lookup_gpa(paddr_t ipa)
+{
+ /*
+ * Realm creation currently sets RmiRealmParams::s2sz to p2m_ipa_bits.
+ * Use the Realm's own s2sz here if that becomes per-domain later.
+ */
+ unsigned int ipa_bits = p2m_ipa_bits;
+ paddr_t mask;
+ paddr_t prot_bit;
+
+ if ( ipa_bits == 0 || ipa_bits > sizeof(paddr_t) * 8 )
+ return ipa;
+
+ mask = GENMASK_ULL(ipa_bits - 1, 0);
+
+ ipa &= mask;
+
+ /*
+ * DEN0137 2.0-bet1 - A5.2.1 Realm IPA space and D2.1 Realm shared
+ * memory protocol description.
+ *
+ * RMM reports the IPA which caused the REC exit. In Realm IPA
+ * space, bit[s2sz - 1] is the protection attribute; if it is set,
+ * the access is to the Unprotected alias. Xen's MMIO handlers are
+ * registered against the ordinary guest physical address, so use the
+ * lower alias as the lookup key rather than retrying after failure.
+ */
+ prot_bit = (paddr_t)1 << (ipa_bits - 1);
+ if ( ipa & prot_bit )
+ {
+ ipa &= ~prot_bit;
+ }
+
+ return ipa;
+}
+
+static void arm_cca_set_mmio_result(unsigned long *entry_flags,
+ register_t *entry_gprs,
+ register_t value)
+{
+ *entry_flags |= ARM_CCA_RMI_REC_ENTER_FLAG_EMUL_MMIO;
+ entry_gprs[0] = value;
+}
+
+static void arm_cca_request_sea(unsigned long *entry_flags,
+ register_t *entry_gprs)
+{
+ *entry_flags |= ARM_CCA_RMI_REC_ENTER_FLAG_INJECT_SEA;
+ entry_gprs[0] = 0;
+}
+
+static bool
+arm_cca_rec_enter_failed_after_shutdown(const struct vcpu *v,
+ const struct arm_smccc_res *res)
+{
+ return v->domain->is_shutting_down &&
+ arm_cca_rmi_status_is(arm_cca_rmi_result(res),
+ ARM_CCA_RMI_ERROR_REALM);
+}
+
+/* DEN0137 2.0-bet1 - D1.3.4 MMIO emulation flow. */
+static void arm_cca_handle_mmio(struct vcpu *v,
+ const struct arm_cca_rmi_rec_exit *exit,
+ unsigned long *entry_flags,
+ register_t *entry_gprs)
+{
+ union hsr hsr = { .bits = exit->esr };
+ paddr_t raw_gpa = ((paddr_t)(exit->hpfar & HPFAR_MASK) << (12 - 4)) |
+ (exit->far & ~PAGE_MASK);
+ mmio_info_t info = {
+ .dabt = hsr.dabt,
+ .gpa = raw_gpa,
+ };
+ struct arm_cca_mmio_ctxt ctxt = {
+ .exit = exit,
+ .result = 0,
+ };
+ enum io_state state;
+
+ if ( !hsr.dabt.valid )
+ {
+ gprintk(XENLOG_INFO,
+ "ARM CCA: MMIO abort without valid syndrome esr=%#lx far=%#llx
hpfar=%#llx\n",
+ exit->esr,
+ (unsigned long long)exit->far,
+ (unsigned long long)exit->hpfar);
+ arm_cca_request_sea(entry_flags, entry_gprs);
+ return;
+ }
+
+ info.dabt_instr.state = INSTR_VALID;
+ info.gpa = arm_cca_mmio_lookup_gpa(raw_gpa);
+
+ state = try_handle_mmio_regops(v, &info, &arm_cca_mmio_regops, &ctxt);
+
+ if ( state == IO_HANDLED )
+ {
+ arm_cca_set_mmio_result(entry_flags, entry_gprs, ctxt.result);
+ }
+ else
+ {
+ gprintk(XENLOG_INFO,
+ "ARM CCA: MMIO emulation failed state=%d raw_gpa=%#llx
gpa=%#llx esr=%#lx\n",
+ state,
+ (unsigned long long)raw_gpa,
+ (unsigned long long)info.gpa,
+ exit->esr);
+ arm_cca_request_sea(entry_flags, entry_gprs);
+ }
+}
+
+/*
+ * DEN0137 2.0-bet1 - D1.4.1 PSCI_CPU_ON flow.
+ * Xen reuses the same completion hook for related PSCI exits.
+ */
+static void
+arm_cca_handle_psci_complete(struct vcpu *v,
+ const struct arm_cca_rmi_rec_exit *exit)
+{
+ unsigned int target = vaffinity_to_vcpuid(exit->gprs[1]);
+ struct vcpu *target_vcpu;
+ struct arm_smccc_res res;
+ bool wake = false;
+ int rc;
+
+ if ( target >= v->domain->max_vcpus )
+ {
+ gprintk(XENLOG_ERR, "ARM CCA: invalid PSCI target vCPU %u\n",
+ target);
+ arm_cca_domain_crash(v);
+ }
+
+ target_vcpu = v->domain->vcpu[target];
+ if ( target_vcpu == NULL || target_vcpu->arch.cca.rec == INVALID_PADDR )
+ {
+ gprintk(XENLOG_ERR,
+ "ARM CCA: missing target REC for PSCI target vCPU %u\n",
+ target);
+ arm_cca_domain_crash(v);
+ }
+
+ rc = arm_cca_rmi_psci_complete(v->arch.cca.rec, target_vcpu->arch.cca.rec,
+ PSCI_SUCCESS, &res);
+ if ( rc != 0 )
+ {
+ gprintk(XENLOG_ERR,
+ "ARM CCA: RMI_PSCI_COMPLETE failed status=%#x data=%#lx\n",
+ arm_cca_rmi_status_code(arm_cca_rmi_result(&res)),
+ (unsigned long)arm_cca_rmi_result_data(
+ arm_cca_rmi_result(&res)));
+ arm_cca_domain_crash(v);
+ }
+
+ if ( exit->gprs[0] == PSCI_0_2_FN32_CPU_ON ||
+ exit->gprs[0] == PSCI_0_2_FN64_CPU_ON )
+ {
+ wake = test_and_clear_bit(_VPF_down, &target_vcpu->pause_flags);
+ if ( wake )
+ vcpu_wake(target_vcpu);
+ }
+}
+
+static void arm_cca_handle_psci_exit(struct vcpu *v,
+ const struct arm_cca_rmi_rec_exit *exit)
+{
+ switch ( exit->gprs[0] )
+ {
+ case PSCI_0_2_FN32_CPU_SUSPEND:
+ case PSCI_0_2_FN64_CPU_SUSPEND:
+ /* Scheduler helpers expect local IRQs enabled when taking locks */
+ local_irq_enable();
+ vcpu_block_unless_event_pending(v);
+ local_irq_disable();
+ arm_cca_service_host_events();
+ break;
+
+ case PSCI_0_2_FN32_CPU_OFF:
+ if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
+ vcpu_sleep_nosync(v);
+ arm_cca_wait_until_online(v);
+ break;
+
+ case PSCI_0_2_FN32_CPU_ON:
+ case PSCI_0_2_FN64_CPU_ON:
+ case PSCI_0_2_FN32_AFFINITY_INFO:
+ case PSCI_0_2_FN64_AFFINITY_INFO:
+ arm_cca_handle_psci_complete(v, exit);
+ break;
+
+ case PSCI_0_2_FN32_SYSTEM_OFF:
+ domain_shutdown(v->domain, SHUTDOWN_poweroff);
+ arm_cca_wait_forever(v);
+
+ case PSCI_0_2_FN32_SYSTEM_RESET:
+ domain_shutdown(v->domain, SHUTDOWN_reboot);
+ arm_cca_wait_forever(v);
+
+ default:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: unsupported PSCI exit fid=%#lx\n",
+ exit->gprs[0]);
+ arm_cca_domain_crash(v);
+ }
+}
+
+
+/*
+ * DEN0137 2.0-bet1 - D1.3.3 REC exit due to Data Abort fault flow.
+ */
+static void arm_cca_handle_sync_exit(struct vcpu *v,
+ const struct arm_cca_rmi_rec_exit *exit,
+ unsigned long *entry_flags,
+ register_t *entry_gprs)
+{
+ union hsr hsr = { .bits = exit->esr };
+
+ switch ( hsr.ec )
+ {
+ case HSR_EC_WFI_WFE:
+ /*
+ * DEN0137 2.0-bet1 - A4.3.4.1 REC exit due to WFI or WFE.
+ * WFET/WFIT exits provide the timeout value in rec_exit.gprs[0].
+ * Treat a non-zero timeout as authoritative because the same section
+ * only guarantees ESR.ISS.TI for this exit class.
+ */
+ if ( (hsr.bits & HSR_WFI_WFE_WFXT) || exit->gprs[0] )
+ {
+ arm_cca_wait_until_wfxt_timeout(v, exit->gprs[0]);
+ break;
+ }
+
+ /* Scheduler helpers expect local IRQs enabled when taking locks */
+ local_irq_enable();
+ if ( hsr.bits & HSR_WFI_WFE_WFE )
+ vcpu_yield();
+ else
+ vcpu_block_unless_event_pending(v);
+
+ local_irq_disable();
+ arm_cca_service_host_events();
+ break;
+
+ case HSR_EC_DATA_ABORT_LOWER_EL:
+ /* vGIC MMIO paths expect local IRQs enabled for spin_lock_irq */
+ local_irq_enable();
+ arm_cca_handle_mmio(v, exit, entry_flags, entry_gprs);
+ local_irq_disable();
+ break;
+
+ default:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: unsupported REC sync exit ec=%#x esr=%#lx\n",
+ hsr.ec, exit->esr);
+ arm_cca_domain_crash(v);
+ }
+}
+
+/* DEN0137 2.0-bet1 - D1.3.1 Realm entry and exit flow. */
+void noreturn arm_cca_vcpu_run(struct vcpu *v)
+{
+ struct arm_cca_rmi_rec_run *run;
+ struct arm_smccc_res res;
+ unsigned long entry_flags;
+ register_t entry_gprs[ARM_CCA_RMI_REC_NR_GPRS];
+ int rc;
+
+ ASSERT(v == current);
+ ASSERT(!is_idle_vcpu(v));
+
+ if ( !is_vcpu_realm(v) )
+ {
+ gprintk(XENLOG_ERR,
+ "ARM CCA: entered Realm run loop for non-Realm vCPU\n");
+ arm_cca_domain_crash(v);
+ }
+
+ if ( v->domain->arch.vgic.version != GIC_V3 )
+ {
+ gprintk(XENLOG_ERR,
+ "ARM CCA: only GICv3 Realms are supported\n");
+ arm_cca_domain_crash(v);
+ }
+
+ if ( v->arch.cca.run == NULL || v->arch.cca.timers == NULL ||
+ v->arch.cca.run_pa == INVALID_PADDR )
+ {
+ gprintk(XENLOG_ERR,
+ "ARM CCA: REC runtime state is not bound\n");
+ arm_cca_domain_crash(v);
+ }
+
+ if ( v->arch.cca.rec == INVALID_PADDR )
+ {
+ gprintk(XENLOG_ERR, "ARM CCA: REC is not bound\n");
+ arm_cca_domain_crash(v);
+ }
+
+ run = v->arch.cca.run;
+ entry_flags = ARM_CCA_RMI_REC_ENTER_FLAG_TRAP_WFI |
+ ARM_CCA_RMI_REC_ENTER_FLAG_TRAP_WFE;
+ memset(entry_gprs, 0, sizeof(entry_gprs));
+
+ local_irq_disable();
+ for ( ; ; )
+ {
+ arm_cca_check_for_vcpu_work(v);
+ arm_cca_service_host_events();
+
+ arm_cca_prepare_rec_enter(run, entry_flags, entry_gprs);
+ entry_flags = ARM_CCA_RMI_REC_ENTER_FLAG_TRAP_WFI |
+ ARM_CCA_RMI_REC_ENTER_FLAG_TRAP_WFE;
+ memset(entry_gprs, 0, sizeof(entry_gprs));
+ rc = arm_cca_rmi_rec_enter(v->arch.cca.rec, v->arch.cca.run_pa, &res);
+ if ( rc != 0 )
+ {
+ if ( arm_cca_rec_enter_failed_after_shutdown(v, &res) )
+ {
+ gprintk(XENLOG_INFO,
+ "ARM CCA: Realm domain is shutting down; "
+ "stopping vCPU\n");
+ arm_cca_wait_forever(v);
+ }
+
+ gprintk(XENLOG_ERR,
+ "ARM CCA: RMI_REC_ENTER failed status=%#x data=%#lx\n",
+ arm_cca_rmi_status_code(arm_cca_rmi_result(&res)),
+ (unsigned long)arm_cca_rmi_result_data(
+ arm_cca_rmi_result(&res)));
+ arm_cca_domain_crash(v);
+ }
+
+ arm_cca_sync_rec_exit(v, &run->exit);
+
+ switch ( run->exit.exit_reason )
+ {
+ case ARM_CCA_RMI_EXIT_IRQ:
+ case ARM_CCA_RMI_EXIT_FIQ:
+ arm_cca_service_host_events();
+ break;
+
+ case ARM_CCA_RMI_EXIT_PSCI:
+ arm_cca_handle_psci_exit(v, &run->exit);
+ break;
+
+ case ARM_CCA_RMI_EXIT_RIPAS_CHANGE:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: RIPAS change exits are unsupported\n");
+ arm_cca_domain_crash(v);
+
+ case ARM_CCA_RMI_EXIT_SYNC:
+ arm_cca_handle_sync_exit(v, &run->exit, &entry_flags,
+ entry_gprs);
+ break;
+
+ case ARM_CCA_RMI_EXIT_HOST_CALL:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: Host call exits are unsupported (imm=%#x)\n",
+ (unsigned int)run->exit.imm);
+ arm_cca_domain_crash(v);
+
+ case ARM_CCA_RMI_EXIT_SERROR:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: SError exit from Realm REC\n");
+ arm_cca_domain_crash(v);
+
+ case ARM_CCA_RMI_EXIT_S2AP_CHANGE:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: S2AP change exits are unsupported "
+ "range=%#lx-%#lx\n",
+ (unsigned long)run->exit.s2ap_base,
+ (unsigned long)run->exit.s2ap_top);
+ arm_cca_domain_crash(v);
+
+ case ARM_CCA_RMI_EXIT_VDEV_REQUEST:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: VDEV request exits are unsupported "
+ "id=%#lx:%#lx\n",
+ (unsigned long)run->exit.vdev_id_1,
+ (unsigned long)run->exit.vdev_id_2);
+ arm_cca_domain_crash(v);
+
+ case ARM_CCA_RMI_EXIT_VDEV_VALIDATE_MAPPING:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: VDEV mapping validation exits are unsupported "
+ "range=%#lx-%#lx pa=%#lx\n",
+ (unsigned long)run->exit.dev_mem_base,
+ (unsigned long)run->exit.dev_mem_top,
+ (unsigned long)run->exit.dev_mem_pa);
+ arm_cca_domain_crash(v);
+
+ case ARM_CCA_RMI_EXIT_VSMMU_COMMAND:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: VSMMU command exits are unsupported "
+ "vsmmu=%#lx\n",
+ (unsigned long)run->exit.vsmmu);
+ arm_cca_domain_crash(v);
+
+ default:
+ gprintk(XENLOG_ERR,
+ "ARM CCA: unknown REC exit reason %#x\n",
+ (unsigned int)run->exit.exit_reason);
+ arm_cca_domain_crash(v);
+ }
+ }
+}
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index d2ee90248a44..c8330e7c969c 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -302,6 +302,10 @@ static void noreturn continue_new_vcpu(struct vcpu *prev)
if ( is_idle_vcpu(current) )
reset_stack_and_jump(idle_loop);
+#ifdef CONFIG_ARM_CCA
+ else if ( is_vcpu_realm(current) )
+ arm_cca_vcpu_run(current);
+#endif
else if ( is_32bit_domain(current->domain) )
/* check_wakeup_from_wait(); */
reset_stack_and_jump(return_to_new_vcpu32);
diff --git a/xen/arch/arm/include/asm/arm64/hsr.h
b/xen/arch/arm/include/asm/arm64/hsr.h
index 1495ccddeab6..7cdddd6ae903 100644
--- a/xen/arch/arm/include/asm/arm64/hsr.h
+++ b/xen/arch/arm/include/asm/arm64/hsr.h
@@ -86,6 +86,7 @@
#define HSR_SYSREG_PMINTENCLR_EL1 HSR_SYSREG(3,0,c9,c14,2)
#define HSR_SYSREG_MAIR_EL1 HSR_SYSREG(3,0,c10,c2,0)
#define HSR_SYSREG_AMAIR_EL1 HSR_SYSREG(3,0,c10,c3,0)
+#define HSR_SYSREG_ICC_DIR_EL1 HSR_SYSREG(3,0,c12,c11,1)
#define HSR_SYSREG_ICC_SGI1R_EL1 HSR_SYSREG(3,0,c12,c11,5)
#define HSR_SYSREG_ICC_ASGI1R_EL1 HSR_SYSREG(3,1,c12,c11,6)
#define HSR_SYSREG_ICC_SGI0R_EL1 HSR_SYSREG(3,2,c12,c11,7)
diff --git a/xen/arch/arm/include/asm/hsr.h b/xen/arch/arm/include/asm/hsr.h
index 9b91b28c48e3..29a91bf866f1 100644
--- a/xen/arch/arm/include/asm/hsr.h
+++ b/xen/arch/arm/include/asm/hsr.h
@@ -168,6 +168,10 @@ union hsr {
#endif
};
+/* HSR.EC == HSR_EC_WFI_WFE */
+#define HSR_WFI_WFE_WFE (_AC(1, UL) << 0)
+#define HSR_WFI_WFE_WFXT (_AC(1, UL) << 1)
+
/* HSR.EC == HSR_CP{15,14,10}_32 */
#define HSR_CP32_OP2_MASK (0x000e0000)
#define HSR_CP32_OP2_SHIFT (17)
--
2.51.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |