|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH v1 18/26] xen/arm/cca: create RECs and activate Realms
Finish the restartable build path with REC creation and Realm
activation. Keep enough state to continue after preemption or abort
cleanly.
Signed-off-by: Koichiro Den <den@xxxxxxxxxxxxx>
---
xen/arch/arm/Kconfig | 11 ++
xen/arch/arm/cca/Makefile | 1 +
xen/arch/arm/cca/build.c | 333 +++++++++++++++++++++++++++++++++
xen/arch/arm/cca/state.c | 7 +
xen/arch/arm/include/asm/cca.h | 26 +++
5 files changed, 378 insertions(+)
diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
index dc99020c96de..3635a79af272 100644
--- a/xen/arch/arm/Kconfig
+++ b/xen/arch/arm/Kconfig
@@ -132,6 +132,17 @@ config ARM_CCA
This support is experimental. If unsure, say N.
+config ARM_CCA_REALM_DEBUG_VUART
+ bool "Debug VUART console for Arm Realm guests"
+ depends on ARM_CCA && SBSA_VUART_CONSOLE
+ help
+ Allow Arm CCA Realm guests to use the emulated SBSA UART console.
+
+ The VUART is a host-visible clear-text debug channel. It is useful
+ for Realm debug access while there is no better guest access
+ path, but confidentiality-oriented configurations should disable it
+ and rely on attested in-Realm services instead.
+
config GICV2
bool "GICv2 driver"
default y
diff --git a/xen/arch/arm/cca/Makefile b/xen/arch/arm/cca/Makefile
index bf6d9b58ebec..0e66280012b7 100644
--- a/xen/arch/arm/cca/Makefile
+++ b/xen/arch/arm/cca/Makefile
@@ -3,3 +3,4 @@ obj-y += realm.o
obj-y += rmi.o
obj-y += sro.o
obj-y += state.o
+obj-y += build.o
diff --git a/xen/arch/arm/cca/build.c b/xen/arch/arm/cca/build.c
index 8e607e97b99c..29eb0c6057b0 100644
--- a/xen/arch/arm/cca/build.c
+++ b/xen/arch/arm/cca/build.c
@@ -788,3 +788,336 @@ err_page:
return rc;
}
+
+static void arm_cca_adopt_rec_aux_pages(struct vcpu *v,
+ struct page_info **pending_aux,
+ unsigned int *nr_pending_aux)
+{
+ unsigned int i;
+
+ for ( i = 0; i < *nr_pending_aux; ++i )
+ {
+ ASSERT(v->arch.cca.nr_aux < ARRAY_SIZE(v->arch.cca.aux_pages));
+ v->arch.cca.aux_pages[v->arch.cca.nr_aux++] = pending_aux[i];
+ pending_aux[i] = NULL;
+ }
+
+ *nr_pending_aux = 0;
+}
+
+static int arm_cca_rmi_rec_create_complete(struct domain *d, struct vcpu *v,
+ paddr_t rec, paddr_t params,
+ uint64_t *rmi_result)
+{
+ struct page_info *pending_aux[ARM_CCA_MAX_REC_AUX] = { NULL };
+ unsigned int nr_pending_aux = 0;
+ struct arm_cca_sro_mem_xfer xfer = {
+ .pages = pending_aux,
+ .nr_pages = &nr_pending_aux,
+ .abandoned_pages = &d->arch.cca.abandoned_pages,
+ };
+ struct arm_smccc_res res;
+ int rc;
+
+ if ( v->arch.cca.nr_aux > ARRAY_SIZE(v->arch.cca.aux_pages) )
+ return -EIO;
+
+ xfer.max_pages = ARRAY_SIZE(v->arch.cca.aux_pages) - v->arch.cca.nr_aux;
+
+ rc = arm_cca_rmi_rec_create(d->arch.cca.rd, rec, params, &res);
+ rc = arm_cca_sro_complete_mem_transfer(rc, &res, &xfer);
+ rc = arm_cca_build_record_rmi_failure(rmi_result, rc, &res);
+
+ if ( rc != 0 && nr_pending_aux != 0 )
+ {
+ /*
+ * Accepted REC auxiliary pages are returned only when the SRO reports
+ * RMI_OP_MEM_REQ_RECLAIM. If it ends before then, Xen has no separate
+ * reclaim operation for them.
+ */
+ d->arch.cca.build_unrecoverable = true;
+ }
+
+ if ( nr_pending_aux != 0 )
+ arm_cca_adopt_rec_aux_pages(v, pending_aux, &nr_pending_aux);
+
+ return rc;
+}
+
+static int arm_cca_create_rec(struct domain *d, struct vcpu *v, bool runnable,
+ uint64_t *rmi_result)
+{
+ struct cpu_user_regs *regs;
+ struct arm_cca_rmi_rec_params *params;
+ struct page_info *rec_pg = NULL;
+ struct page_info *params_pg = NULL;
+ bool rec_delegated = false, run_created = false;
+ void *va;
+ int rc = -ENOMEM;
+
+ rec_pg = arm_cca_alloc_host_page();
+ if ( !rec_pg )
+ goto out;
+
+ params_pg = arm_cca_alloc_host_page();
+ if ( !params_pg )
+ goto out;
+
+ if ( !v->arch.cca.run )
+ {
+ v->arch.cca.run = arm_cca_alloc_rec_run();
+ if ( !v->arch.cca.run )
+ goto out;
+ v->arch.cca.run_pa = virt_to_maddr(v->arch.cca.run);
+ run_created = true;
+ }
+
+ rc = arm_cca_delegate_granule(page_to_maddr(rec_pg));
+ if ( rc != 0 )
+ goto out;
+ rec_delegated = true;
+
+ va = map_domain_page(page_to_mfn(params_pg));
+ params = va;
+ regs = &v->arch.cpu_info->guest_cpu_user_regs;
+
+ /*
+ * REC_CREATE takes an RmiRecMpidr affinity value, not a full MPIDR_EL1.
+ * DEN0137 2.0-bet1 - B4.6.68 RmiRecMpidr type.
+ */
+ arm_cca_rec_params_init(params, vcpuid_to_vaffinity(v->vcpu_id),
+ regs->pc, runnable);
+ params->gprs[0] = regs->x0;
+ params->gprs[1] = regs->x1;
+ params->gprs[2] = regs->x2;
+ params->gprs[3] = regs->x3;
+
+ unmap_domain_page(va);
+
+ rc = arm_cca_rmi_rec_create_complete(d, v, page_to_maddr(rec_pg),
+ page_to_maddr(params_pg),
+ rmi_result);
+ if ( rc != 0 )
+ goto out;
+
+ arm_cca_rec_run_init(v->arch.cca.run);
+
+ v->arch.cca.rec_page = rec_pg;
+ v->arch.cca.rec = page_to_maddr(rec_pg);
+ rc = 0;
+
+out:
+ if ( params_pg )
+ free_domheap_page(params_pg);
+
+ if ( rc != 0 && rec_pg )
+ arm_cca_free_or_abandon_build_page(d, rec_pg, rec_delegated);
+
+ if ( rc != 0 && run_created )
+ {
+ arm_cca_free_rec_run(v->arch.cca.run);
+ v->arch.cca.run = NULL;
+ v->arch.cca.run_pa = INVALID_PADDR;
+ }
+
+ return rc;
+}
+
+/* DEN0137 2.0-bet1 - D1.2.4 REC creation flow. */
+static int arm_cca_create_recs(struct domain *d, uint64_t *rmi_result)
+{
+ unsigned int i;
+ int rc;
+
+ for ( i = 0; i < d->max_vcpus; ++i )
+ {
+ struct vcpu *v = d->vcpu[i];
+
+ ASSERT(v);
+
+ rc = arm_cca_create_rec(d, v, i == 0, rmi_result);
+ if ( rc != 0 )
+ return rc;
+
+ }
+
+ return 0;
+}
+
+static void arm_cca_build_state_clear(struct domain *d)
+{
+ d->arch.cca.build_phase = ARM_CCA_BUILD_NONE;
+ d->arch.cca.build_unrecoverable = false;
+ d->arch.cca.build_abort_rmi_result = 0;
+ d->arch.cca.build_base_gfn = INVALID_GFN;
+ d->arch.cca.build_nr_pages = 0;
+ d->arch.cca.build_next_ipa = INVALID_PADDR;
+ d->arch.cca.build_rtt_level = 0;
+}
+
+static void arm_cca_build_state_start(struct domain *d, gfn_t base_gfn,
+ unsigned long nr_pages)
+{
+ d->arch.cca.build_phase = ARM_CCA_BUILD_VALIDATE;
+ d->arch.cca.build_unrecoverable = false;
+ d->arch.cca.build_abort_rmi_result = 0;
+ d->arch.cca.build_base_gfn = base_gfn;
+ d->arch.cca.build_nr_pages = nr_pages;
+ d->arch.cca.build_next_ipa = INVALID_PADDR;
+ d->arch.cca.build_rtt_level = 0;
+}
+
+static int arm_cca_build_state_check(struct domain *d, gfn_t base_gfn,
+ unsigned long nr_pages)
+{
+ if ( d->arch.cca.build_phase == ARM_CCA_BUILD_NONE )
+ {
+ arm_cca_build_state_start(d, base_gfn, nr_pages);
+ return 0;
+ }
+
+ if ( !gfn_eq(d->arch.cca.build_base_gfn, base_gfn) ||
+ d->arch.cca.build_nr_pages != nr_pages )
+ return -EBUSY;
+
+ return 0;
+}
+
+static int arm_cca_domain_finalize_abort(struct domain *d)
+{
+ unsigned int i;
+ bool fatal = d->arch.cca.build_unrecoverable;
+ int rc;
+
+ rc = arm_cca_domain_relinquish_resources(d);
+ if ( rc != 0 )
+ {
+ if ( rc == -ERESTART )
+ return rc;
+
+ return -EIO;
+ }
+
+ for ( i = 0; i < d->max_vcpus; ++i )
+ {
+ struct vcpu *v = d->vcpu[i];
+
+ if ( v == NULL )
+ continue;
+
+ arm_cca_vcpu_destroy(v);
+ arm_cca_vcpu_init(v);
+ }
+
+ return fatal ? -EIO : 0;
+}
+
+int arm_cca_domain_finalize(struct domain *d, gfn_t base_gfn,
+ unsigned long nr_pages,
+ uint64_t *rmi_result)
+{
+ paddr_t base = gfn_to_gaddr(base_gfn);
+ struct arm_smccc_res res;
+ uint64_t abort_rmi_result;
+ int abort_rc, rc;
+
+ if ( rmi_result )
+ *rmi_result = 0;
+
+ rc = arm_cca_build_state_check(d, base_gfn, nr_pages);
+ if ( rc != 0 )
+ return rc;
+
+ for ( ;; )
+ {
+ switch ( d->arch.cca.build_phase )
+ {
+ case ARM_CCA_BUILD_VALIDATE:
+ rc = arm_cca_validate_domain(d, base_gfn, nr_pages);
+ if ( rc != 0 )
+ {
+ arm_cca_build_state_clear(d);
+ return rc;
+ }
+ rc = arm_cca_probe(d);
+ if ( rc != 0 )
+ {
+ arm_cca_build_state_clear(d);
+ return rc;
+ }
+ d->arch.cca.build_phase = ARM_CCA_BUILD_CREATE_REALM;
+ fallthrough;
+
+ case ARM_CCA_BUILD_CREATE_REALM:
+ rc = arm_cca_create_realm(d, rmi_result);
+ if ( rc != 0 )
+ goto err;
+ d->arch.cca.build_phase = ARM_CCA_BUILD_BUILD_RTTS;
+ fallthrough;
+
+ case ARM_CCA_BUILD_BUILD_RTTS:
+ rc = arm_cca_build_rtts(d, base, nr_pages, rmi_result);
+ if ( rc == -ERESTART )
+ return rc;
+ if ( rc != 0 )
+ goto err;
+ d->arch.cca.build_phase = ARM_CCA_BUILD_CREATE_DATA;
+ fallthrough;
+
+ case ARM_CCA_BUILD_CREATE_DATA:
+ rc = arm_cca_create_data_pages(d, base_gfn, nr_pages, rmi_result);
+ if ( rc == -ERESTART )
+ return rc;
+ if ( rc != 0 )
+ goto err;
+ d->arch.cca.build_phase = ARM_CCA_BUILD_CREATE_REC;
+ fallthrough;
+
+ case ARM_CCA_BUILD_CREATE_REC:
+ rc = arm_cca_create_recs(d, rmi_result);
+ if ( rc != 0 )
+ goto err;
+ d->arch.cca.build_phase = ARM_CCA_BUILD_ACTIVATE;
+ fallthrough;
+
+ case ARM_CCA_BUILD_ACTIVATE:
+ rc = arm_cca_rmi_realm_activate(d->arch.cca.rd, &res);
+ rc = arm_cca_build_record_rmi_failure(rmi_result, rc, &res);
+ if ( rc != 0 )
+ goto err;
+ d->arch.cca.build_phase = ARM_CCA_BUILD_BIND;
+ fallthrough;
+
+ case ARM_CCA_BUILD_BIND:
+ d->arch.cca.realm_active = true;
+ arm_cca_build_state_clear(d);
+ return 0;
+
+ case ARM_CCA_BUILD_ABORT:
+ abort_rmi_result = d->arch.cca.build_abort_rmi_result;
+ rc = arm_cca_domain_finalize_abort(d);
+ if ( rc == -ERESTART )
+ return rc;
+ /* The original build error is gone after a continuation. */
+ if ( rc == 0 )
+ rc = -EIO;
+ if ( rc == -EIO && rmi_result )
+ *rmi_result = abort_rmi_result;
+ return rc;
+
+ default:
+ rc = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ d->arch.cca.build_abort_rmi_result = rmi_result ? *rmi_result : 0;
+ d->arch.cca.build_phase = ARM_CCA_BUILD_ABORT;
+
+ abort_rc = arm_cca_domain_finalize_abort(d);
+ if ( abort_rc == -ERESTART )
+ return abort_rc;
+
+ return abort_rc ?: rc;
+}
diff --git a/xen/arch/arm/cca/state.c b/xen/arch/arm/cca/state.c
index 72dbb83841d7..d85a20a9e7d2 100644
--- a/xen/arch/arm/cca/state.c
+++ b/xen/arch/arm/cca/state.c
@@ -32,6 +32,13 @@ static void arm_cca_reset_domain_state(struct domain *d)
d->arch.cca.nr_data_pages = 0;
d->arch.cca.relinquish_data_idx = 0;
d->arch.cca.realm_terminate_done = false;
+ d->arch.cca.build_phase = ARM_CCA_BUILD_NONE;
+ d->arch.cca.build_unrecoverable = false;
+ d->arch.cca.build_abort_rmi_result = 0;
+ d->arch.cca.build_base_gfn = INVALID_GFN;
+ d->arch.cca.build_nr_pages = 0;
+ d->arch.cca.build_next_ipa = INVALID_PADDR;
+ d->arch.cca.build_rtt_level = 0;
for ( i = 0; i < ARRAY_SIZE(d->arch.cca.realm_sro_pages); ++i )
d->arch.cca.realm_sro_pages[i] = NULL;
diff --git a/xen/arch/arm/include/asm/cca.h b/xen/arch/arm/include/asm/cca.h
index d69e95a10010..5e6b11a3693d 100644
--- a/xen/arch/arm/include/asm/cca.h
+++ b/xen/arch/arm/include/asm/cca.h
@@ -2,6 +2,7 @@
#ifndef ARM_CCA_H
#define ARM_CCA_H
+#include <xen/compiler.h>
#include <xen/init.h>
#include <xen/mm.h>
#include <xen/types.h>
@@ -41,6 +42,18 @@ struct arm_cca_data_page_record {
paddr_t pa;
};
+enum arm_cca_build_phase {
+ ARM_CCA_BUILD_NONE,
+ ARM_CCA_BUILD_VALIDATE,
+ ARM_CCA_BUILD_CREATE_REALM,
+ ARM_CCA_BUILD_BUILD_RTTS,
+ ARM_CCA_BUILD_CREATE_DATA,
+ ARM_CCA_BUILD_CREATE_REC,
+ ARM_CCA_BUILD_ACTIVATE,
+ ARM_CCA_BUILD_BIND,
+ ARM_CCA_BUILD_ABORT,
+};
+
struct arm_cca_domain_state {
bool realm_active;
paddr_t rd;
@@ -70,6 +83,15 @@ struct arm_cca_domain_state {
/* Realm destruction state for domain_relinquish_resources(). */
unsigned long relinquish_data_idx;
bool realm_terminate_done;
+
+ /* Continuable Realm construction state for XEN_DOMCTL_arm_cca_op. */
+ enum arm_cca_build_phase build_phase;
+ bool build_unrecoverable;
+ uint64_t build_abort_rmi_result;
+ gfn_t build_base_gfn;
+ unsigned long build_nr_pages;
+ paddr_t build_next_ipa;
+ unsigned int build_rtt_level;
};
struct arm_cca_vcpu_state {
@@ -85,8 +107,12 @@ struct arm_cca_vcpu_state {
void arm_cca_domain_init(struct domain *d);
void arm_cca_domain_destroy(struct domain *d);
int arm_cca_domain_relinquish_resources(struct domain *d);
+int arm_cca_domain_finalize(struct domain *d, gfn_t base_gfn,
+ unsigned long nr_pages,
+ uint64_t *rmi_result);
void arm_cca_vcpu_init(struct vcpu *v);
+void noreturn arm_cca_vcpu_run(struct vcpu *v);
void arm_cca_vcpu_destroy(struct vcpu *v);
void *arm_cca_alloc_rec_run(void);
--
2.51.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |