[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v1 17/26] xen/arm/cca: populate Realm DATA granules


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Koichiro Den <den@xxxxxxxxxxxxx>
  • Date: Fri, 15 May 2026 13:08:03 +0900
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=valinux.co.jp; dmarc=pass action=none header.from=valinux.co.jp; dkim=pass header.d=valinux.co.jp; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=GEdXsCdriXn7T5hiFjbS6jzjWemTNnbeU0cz8lWbPus=; b=PrDcd/wYAL0OdhAwxUSTkPisMnEu/FSLf150+HLudg2uwI8a9hPlLfcqoeVqJkZekGRwHQjM81/hTeBkY5QutHkZ5gnl1VWb0adoJVlgLRF5drH5keN2amwF6FUMqqdcRjnqXW8/BtDWnuwaGxerCXA+RsTfuD4btZij7iuuvBG37TdelvJSwyBO+HnO/ChMTW5mMID7Fsivl+FELVA+z1GSIcZhlTZHYrlzakSOts1vl+MHIzHnHGgJAeBdIcxet0HSgLz95ermsOqu6iCpWoqRzQ++RBWofU0hvWDnXA3ULKZE/FcaTzFIEnqzuray3eDwGmIfnJcuhKb5v31bMw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=EaMLNYa3RCkFZn9Ou4MuWZ+BQTd/LdhEIwfzfGqgN7UErUp8lTxNkvaWhju0Xt6Uli6fnfLkGGSKT2FHTdc2TdOlfwJK/IKiDnMzjH6ZRluf7ySroD5LfIE9zoazbLZWp3Wku3Tk8yOxI47yDFB5i4NI21efzHz9DD0HsZ+vTek+wE6nsdujp63chsFClXviI3y9Uscn/9Td68YsDzAhOhycqumaspe5+BqUimkkkmWDQdaHZF/gwwQrKATl7QBSoeCjsMKDPtHulpQL37mHhrbfvCo/4feGOditlqmRXel3WUtRNKc8zE6SpEBqxAimTqvakmzNEexRNfoAjRHOnA==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=valinux.co.jp header.i="@valinux.co.jp" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=valinux.co.jp;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Julien Grall <julien@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, "Daniel P. Smith" <dpsmith@xxxxxxxxxxxxxxxxxxxx>, Juergen Gross <jgross@xxxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Fri, 15 May 2026 04:12:41 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Move initial guest RAM out of the p2m, delegate it, and map it into the
Realm with measurement enabled.

Signed-off-by: Koichiro Den <den@xxxxxxxxxxxxx>
---
 xen/arch/arm/cca/build.c | 124 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 124 insertions(+)

diff --git a/xen/arch/arm/cca/build.c b/xen/arch/arm/cca/build.c
index 10f61b5038ef..8e607e97b99c 100644
--- a/xen/arch/arm/cca/build.c
+++ b/xen/arch/arm/cca/build.c
@@ -664,3 +664,127 @@ static int arm_cca_build_rtts(struct domain *d, paddr_t 
base,
 
     return 0;
 }
+
+/*
+ * DEN0137 2.0-bet1 - D1.2.3 Initialize memory of New Realm flow.
+ */
+static int arm_cca_create_data_pages(struct domain *d, gfn_t base_gfn,
+                                     unsigned long nr_pages,
+                                     uint64_t *rmi_result)
+{
+    struct page_info *scratch_pg;
+    void *scratch;
+    unsigned long i, work = 0;
+    int rc = 0;
+
+    if ( !d->arch.cca.data_pages )
+    {
+        d->arch.cca.data_pages = xzalloc_array(struct arm_cca_data_page_record,
+                                               nr_pages);
+        if ( !d->arch.cca.data_pages )
+            return -ENOMEM;
+    }
+
+    scratch_pg = arm_cca_alloc_host_page();
+    if ( !scratch_pg )
+        return -ENOMEM;
+
+    scratch = map_domain_page(page_to_mfn(scratch_pg));
+
+    for ( i = d->arch.cca.nr_data_pages; i < nr_pages; ++i )
+    {
+        paddr_t ipa = gfn_to_gaddr(gfn_add(base_gfn, i));
+        struct page_info *page;
+        p2m_type_t p2mt;
+        mfn_t mfn;
+        void *src;
+        struct arm_smccc_res res;
+        bool removed = false, delegated = false;
+
+        page = get_page_from_gfn(d, gfn_x(gfn_add(base_gfn, i)), &p2mt,
+                                 P2M_ALLOC);
+        if ( !page )
+        {
+            rc = -ENOENT;
+            break;
+        }
+
+        if ( p2mt != p2m_ram_rw )
+        {
+            put_page(page);
+            rc = -EINVAL;
+            break;
+        }
+
+        mfn = page_to_mfn(page);
+        src = map_domain_page(mfn);
+        memcpy(scratch, src, PAGE_SIZE);
+        unmap_domain_page(src);
+
+        rc = guest_physmap_remove_page(d, gfn_add(base_gfn, i), mfn, 0);
+        if ( rc != 0 )
+        {
+            put_page(page);
+            break;
+        }
+        removed = true;
+        d->arch.cca.build_unrecoverable = true;
+
+        rc = arm_cca_delegate_granule(page_to_maddr(page));
+        if ( rc != 0 )
+            goto err_page;
+        delegated = true;
+
+        /*
+         * TODO: If plain RAM should be left out of RIM, have the toolstack
+         * pass explicit measured ranges (kernel, initrd, DTB, etc.) and use
+         * DATA_MAP outside those ranges. Do not infer this from page contents.
+         */
+        rc = arm_cca_rmi_rtt_data_map_init(
+            d->arch.cca.rd, page_to_maddr(page), ipa,
+            page_to_maddr(scratch_pg),
+            ARM_CCA_RMI_DATA_FLAGS_MEASURE_CONTENT, &res);
+        rc = arm_cca_build_record_rmi_failure(rmi_result, rc, &res);
+        if ( rc != 0 )
+            goto err_page;
+
+        d->arch.cca.data_pages[i].ipa = ipa;
+        d->arch.cca.data_pages[i].pa = mfn_to_maddr(mfn);
+        d->arch.cca.nr_data_pages++;
+
+        put_page(page);
+
+        if ( arm_cca_build_should_preempt(++work,
+                                          ARM_CCA_BUILD_PREEMPT_PAGES,
+                                          ARM_CCA_BUILD_FORCE_PREEMPT_PAGES) )
+        {
+            rc = -ERESTART;
+            break;
+        }
+
+        continue;
+
+err_page:
+        if ( delegated )
+        {
+            if ( arm_cca_undelegate_build_page(d, page) )
+                delegated = false;
+            else
+            {
+                d->arch.cca.data_pages[i].ipa = INVALID_PADDR;
+                d->arch.cca.data_pages[i].pa = page_to_maddr(page);
+                d->arch.cca.nr_data_pages++;
+            }
+        }
+
+        put_page(page);
+        if ( removed && !delegated )
+            free_domheap_page(page);
+        break;
+    }
+
+    unmap_domain_page(scratch);
+    free_domheap_page(scratch_pg);
+
+    return rc;
+}
-- 
2.51.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.