|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH v1 14/26] xen/arm/cca: validate Realm construction inputs
Check the domain shape, RAM layout and RMM feature set before starting a
Realm build. The domctl range is treated as the protected Realm RAM.
Signed-off-by: Koichiro Den <den@xxxxxxxxxxxxx>
---
xen/arch/arm/cca/build.c | 411 +++++++++++++++++++++++++++++++++++++++
1 file changed, 411 insertions(+)
create mode 100644 xen/arch/arm/cca/build.c
diff --git a/xen/arch/arm/cca/build.c b/xen/arch/arm/cca/build.c
new file mode 100644
index 000000000000..f333813e10a0
--- /dev/null
+++ b/xen/arch/arm/cca/build.c
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/mm.h>
+#include <xen/domain_page.h>
+#include <xen/sched.h>
+#include <xen/sizes.h>
+#include <xen/xmalloc.h>
+
+#include <asm/arm64/sve.h>
+#include <asm/cca.h>
+#include <asm/domain.h>
+#include <asm/event.h>
+#include <asm/p2m.h>
+#include <asm/vpl011.h>
+
+#include "rmi.h"
+#include "sro.h"
+
+/*
+ * Xen CCA currently accepts only 4KB RMI granules. With 512 entries per RTT,
+ * the table spans here are 512GB, 1GB, and 2MB respectively.
+ */
+#define ARM_CCA_L0_TABLE_SPAN GB(512)
+#define ARM_CCA_L1_TABLE_SPAN GB(1)
+#define ARM_CCA_L2_TABLE_SPAN SZ_2M
+#define ARM_CCA_REALM_MAX_PAGES (GUEST_RAM0_SIZE >> PAGE_SHIFT)
+#define ARM_CCA_BUILD_PREEMPT_TABLES 64U
+#define ARM_CCA_BUILD_FORCE_PREEMPT_TABLES 256U
+#define ARM_CCA_BUILD_PREEMPT_PAGES 1024UL
+#define ARM_CCA_BUILD_FORCE_PREEMPT_PAGES 4096UL
+#define ARM_CCA_REALM_MAX_IPA_BITS_WITHOUT_LPA2 48U
+
+static struct page_info *arm_cca_alloc_host_page(void)
+{
+ struct page_info *pg;
+ void *va;
+
+ pg = alloc_domheap_page(NULL, 0);
+ if ( !pg )
+ return NULL;
+
+ va = map_domain_page(page_to_mfn(pg));
+ clear_page(va);
+ unmap_domain_page(va);
+
+ return pg;
+}
+
+static unsigned int arm_cca_feature_field(unsigned long features,
+ unsigned int shift,
+ unsigned int width)
+{
+ return arm_cca_rmi_field_get(features, shift, width);
+}
+
+static bool arm_cca_feature_is_true(unsigned long features, unsigned int shift)
+{
+ return arm_cca_feature_field(features, shift, 1U) ==
+ ARM_CCA_RMI_FEATURE_TRUE;
+}
+
+static int arm_cca_build_record_rmi_failure(uint64_t *rmi_result, int rc,
+ const struct arm_smccc_res *res)
+{
+ uint64_t result = arm_cca_rmi_result(res);
+
+ if ( rc != 0 && rmi_result &&
+ (arm_cca_rmi_status_is_error(result) ||
+ arm_cca_rmi_result_is_smccc_unknown(result)) )
+ *rmi_result = result;
+
+ return rc;
+}
+
+static bool arm_cca_undelegate_build_page(struct domain *d,
+ struct page_info *pg)
+{
+ paddr_t pa = page_to_maddr(pg);
+ int rc = arm_cca_undelegate_granule(pa);
+
+ if ( rc != 0 )
+ {
+ /* No successful undelegation means no RMM wipe guarantee. */
+ printk(XENLOG_ERR
+ "%pd: ARM CCA: failed to undelegate abandoned granule %#"
+ PRIpaddr "\n",
+ d, pa);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Return true when the caller can forget pg. A false return means
+ * undelegation failed, so the caller must keep the page tracked.
+ */
+static bool arm_cca_free_build_page(struct domain *d, struct page_info *pg,
+ bool delegated)
+{
+ if ( !pg )
+ return true;
+
+ if ( delegated && !arm_cca_undelegate_build_page(d, pg) )
+ return false;
+
+ free_domheap_page(pg);
+
+ return true;
+}
+
+static void arm_cca_free_or_abandon_build_page(struct domain *d,
+ struct page_info *pg,
+ bool delegated)
+{
+ if ( !arm_cca_free_build_page(d, pg, delegated) )
+ page_list_add_tail(pg, &d->arch.cca.abandoned_pages);
+}
+
+static bool arm_cca_gfn_range_contains(gfn_t base, gfn_t end,
+ gfn_t start, gfn_t next)
+{
+ return gfn_x(base) <= gfn_x(start) && gfn_x(next) <= gfn_x(end);
+}
+
+static bool arm_cca_gfn_range_overlaps(gfn_t base, gfn_t end,
+ gfn_t start, gfn_t next)
+{
+ return gfn_x(start) < gfn_x(end) && gfn_x(base) < gfn_x(next);
+}
+
+/*
+ * Initial Realm support has no separate private/shared memory metadata.
+ * Treat the domctl range as the whole protected Realm RAM contract: every
+ * mapped entry in the range must be writable RAM, and no other valid guest
+ * mapping may remain outside it apart from Arm magic pages, which are not
+ * exposed through the Realm device tree.
+ *
+ * p2m_get_entry() can return block mappings, so reject entries which only
+ * partly overlap the Realm RAM range. Otherwise one p2m block could make
+ * memory outside the measured/protected range guest-visible.
+ */
+static int arm_cca_validate_ram_layout(struct domain *d, gfn_t base_gfn,
+ unsigned long nr_pages)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ gfn_t end_gfn = gfn_add(base_gfn, nr_pages);
+ gfn_t magic_base = _gfn(GUEST_MAGIC_BASE >> PAGE_SHIFT);
+ gfn_t magic_end = _gfn((GUEST_MAGIC_BASE + GUEST_MAGIC_SIZE) >>
+ PAGE_SHIFT);
+ gfn_t gfn, end;
+ int rc = 0;
+
+ p2m_read_lock(p2m);
+
+ gfn = p2m->lowest_mapped_gfn;
+ end = gfn_add(p2m->max_mapped_gfn, 1);
+
+ while ( gfn_x(gfn) < gfn_x(end) )
+ {
+ unsigned int order;
+ p2m_type_t t;
+ bool valid;
+ mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order, &valid);
+ gfn_t next = gfn_next_boundary(gfn, order);
+
+ if ( arm_cca_gfn_range_overlaps(base_gfn, end_gfn, gfn, next) )
+ {
+ if ( !valid || mfn_eq(mfn, INVALID_MFN) ||
+ t != p2m_ram_rw ||
+ !arm_cca_gfn_range_contains(base_gfn, end_gfn, gfn, next) )
+ {
+ rc = -EINVAL;
+ break;
+ }
+ }
+ else if ( valid && !mfn_eq(mfn, INVALID_MFN) &&
+ !arm_cca_gfn_range_contains(magic_base, magic_end, gfn,
+ next) )
+ {
+ rc = -EINVAL;
+ break;
+ }
+
+ gfn = next;
+ }
+
+ p2m_read_unlock(p2m);
+
+ return rc;
+}
+
+static int arm_cca_validate_domain(struct domain *d, gfn_t base_gfn,
+ unsigned long nr_pages)
+{
+ unsigned int i;
+ int rc;
+
+ if ( is_hardware_domain(d) )
+ return -EOPNOTSUPP;
+
+ if ( !is_hvm_domain(d) )
+ return -EOPNOTSUPP;
+
+ if ( is_32bit_domain(d) )
+ return -EOPNOTSUPP;
+
+ if ( d->max_vcpus == 0 || d->vcpu[0] == NULL )
+ return -EOPNOTSUPP;
+
+ for ( i = 1; i < d->max_vcpus; ++i )
+ if ( d->vcpu[i] == NULL )
+ return -EOPNOTSUPP;
+
+ if ( d->arch.vgic.version != GIC_V3 )
+ return -EOPNOTSUPP;
+
+ if ( d->creation_finished )
+ return -EPERM;
+
+ if ( is_domain_realm(d) )
+ return -EEXIST;
+
+ if ( domain_has_vpl011(d) &&
+ !IS_ENABLED(CONFIG_ARM_CCA_REALM_DEBUG_VUART) )
+ {
+ printk(XENLOG_G_ERR
+ "ARM CCA: Realm VUART debug console is disabled for %pd\n",
+ d);
+ return -EOPNOTSUPP;
+ }
+
+ if ( gfn_x(base_gfn) != (GUEST_RAM_BASE >> PAGE_SHIFT) )
+ return -EOPNOTSUPP;
+
+ if ( nr_pages == 0 || nr_pages > ARM_CCA_REALM_MAX_PAGES )
+ return -EINVAL;
+
+ /*
+ * domain_tot_pages() also includes Arm magic pages outside guest RAM, so
+ * it is only a sanity upper bound for the protected RAM range.
+ */
+ if ( nr_pages > domain_tot_pages(d) )
+ return -EINVAL;
+
+ rc = arm_cca_validate_ram_layout(d, base_gfn, nr_pages);
+ if ( rc != 0 )
+ return rc;
+
+ return 0;
+}
+
+static int arm_cca_validate_realm_features0(struct domain *d)
+{
+ unsigned long features0 = d->arch.cca.rmi_features0;
+ unsigned int max_ipa_bits;
+ unsigned int num_bps, num_wps;
+
+ max_ipa_bits = arm_cca_feature_field(
+ features0, ARM_CCA_RMI_FEATURE_REGISTER_0_S2SZ_SHIFT,
+ ARM_CCA_RMI_FEATURE_REGISTER_0_S2SZ_WIDTH);
+ if ( p2m_ipa_bits > max_ipa_bits )
+ {
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requires %u-bit IPA, RMM supports %u-bit IPA\n",
+ d, p2m_ipa_bits, max_ipa_bits);
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Xen CCA currently leaves RmiRealmParams::flags0.lpa2, sve, and pmu
+ * clear. Reject configurations that would need those Realm features
+ * instead of silently creating a Realm with different behavior.
+ */
+ if ( p2m_ipa_bits > ARM_CCA_REALM_MAX_IPA_BITS_WITHOUT_LPA2 )
+ {
+ if ( !arm_cca_feature_is_true(
+ features0, ARM_CCA_RMI_FEATURE_REGISTER_0_LPA2_SHIFT) )
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requires LPA2 for %u-bit IPA, "
+ "but RMM does not report LPA2 support\n",
+ d, p2m_ipa_bits);
+ else
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requires LPA2 for %u-bit IPA, "
+ "but Xen CCA does not enable Realm LPA2 yet\n",
+ d, p2m_ipa_bits);
+ return -EOPNOTSUPP;
+ }
+
+ if ( is_sve_domain(d) )
+ {
+ if ( !arm_cca_feature_is_true(
+ features0, ARM_CCA_RMI_FEATURE_REGISTER_0_SVE_SHIFT) )
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requests SVE, "
+ "but RMM does not report SVE support\n",
+ d);
+ else
+ {
+ unsigned int max_sve_vl = arm_cca_feature_field(
+ features0, ARM_CCA_RMI_FEATURE_REGISTER_0_SVE_VL_SHIFT,
+ ARM_CCA_RMI_FEATURE_REGISTER_0_SVE_VL_WIDTH);
+
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requests SVE; RMM supports %u-bit VL, "
+ "but Xen CCA does not enable Realm SVE yet\n",
+ d, (max_sve_vl + 1U) * SVE_VL_MULTIPLE_VAL);
+ }
+ return -EOPNOTSUPP;
+ }
+
+ num_bps = arm_cca_feature_field(
+ features0, ARM_CCA_RMI_FEATURE_REGISTER_0_NUM_BPS_SHIFT,
+ ARM_CCA_RMI_FEATURE_REGISTER_0_NUM_BPS_WIDTH);
+ if ( num_bps == 0 )
+ {
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd RMM reports reserved NUM_BPS value 0\n", d);
+ return -EOPNOTSUPP;
+ }
+
+ num_wps = arm_cca_feature_field(
+ features0, ARM_CCA_RMI_FEATURE_REGISTER_0_NUM_WPS_SHIFT,
+ ARM_CCA_RMI_FEATURE_REGISTER_0_NUM_WPS_WIDTH);
+ if ( num_wps == 0 )
+ {
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd RMM reports reserved NUM_WPS value 0\n", d);
+ return -EOPNOTSUPP;
+ }
+
+ if ( d->options & XEN_DOMCTL_CDF_vpmu )
+ {
+ if ( !arm_cca_feature_is_true(
+ features0, ARM_CCA_RMI_FEATURE_REGISTER_0_PMU_SHIFT) )
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requests PMU, "
+ "but RMM does not report PMU support\n",
+ d);
+ else
+ {
+ unsigned int num_ctrs = arm_cca_feature_field(
+ features0,
+ ARM_CCA_RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS_SHIFT,
+ ARM_CCA_RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS_WIDTH);
+
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requests PMU; RMM supports %u counters, "
+ "but Xen CCA does not enable Realm PMU yet\n",
+ d, num_ctrs);
+ }
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int arm_cca_validate_realm_features1(struct domain *d)
+{
+ unsigned long features1 = d->arch.cca.rmi_features1;
+ unsigned int max_recs_order, max_recs;
+
+ if ( !arm_cca_feature_is_true(
+ features1,
+ ARM_CCA_RMI_FEATURE_REGISTER_1_RMI_GRAN_SZ_4KB_SHIFT) )
+ {
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requires 4KB RMI granules, "
+ "but RMM does not report 4KB RMI granule support\n",
+ d);
+ return -EOPNOTSUPP;
+ }
+
+ if ( !arm_cca_feature_is_true(
+ features1, ARM_CCA_RMI_FEATURE_REGISTER_1_HASH_SHA_256_SHIFT) )
+ {
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requires SHA-256 Realm measurements, "
+ "but RMM does not report SHA-256 support\n",
+ d);
+ return -EOPNOTSUPP;
+ }
+
+ max_recs_order = arm_cca_feature_field(
+ features1, ARM_CCA_RMI_FEATURE_REGISTER_1_MAX_RECS_ORDER_SHIFT,
+ ARM_CCA_RMI_FEATURE_REGISTER_1_MAX_RECS_ORDER_WIDTH);
+ max_recs = (1U << max_recs_order) - 1U;
+ if ( d->max_vcpus > max_recs )
+ {
+ printk(XENLOG_G_ERR
+ "ARM CCA: %pd requires %u RECs, RMM supports %u RECs\n",
+ d, d->max_vcpus, max_recs);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int arm_cca_validate_realm_features(struct domain *d)
+{
+ int rc;
+
+ rc = arm_cca_validate_realm_features0(d);
+ if ( rc != 0 )
+ return rc;
+
+ return arm_cca_validate_realm_features1(d);
+}
--
2.51.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |