[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v1 08/26] xen/arm/cca: add shared SRO helpers


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Koichiro Den <den@xxxxxxxxxxxxx>
  • Date: Fri, 15 May 2026 13:07:54 +0900
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=valinux.co.jp; dmarc=pass action=none header.from=valinux.co.jp; dkim=pass header.d=valinux.co.jp; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=HYkQKXdORTfgPSnkO/UtbvHd/o4UIhbIJXpGqu2bagU=; b=k/9lMGWoC73rjW6zdGZq+rF/7SvnCHuPibAES6D0nLhI0IDkp7SQw0l2S48qB8vBex7mLXRLUs1VTLE8uGMR2CjXg9Hhpfzo2TqLXSr6tqyXo5wPJl5QXTbKtWre79dRvnzmArc9xfSwoyJ0uuM8vyiMmJY6irKiZTAhCXvhkdiitoIo2ncaXSY5faCPkprr0vqekv0Yzz6MH1zb0J0G+47Q1IDSnKB0QGTIRPU9szlFM11c6fCQ6jGfuNGp+tnx4SUf9536X+HB+A/E2msfJgwpk4HYgh49vEKIh9silqmjQUKsuUx+xMb6nBMxo8QCCtwq+ZNClPIbcj8P2a/N+Q==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=dwrqvmBDAGah25IIMw9ewVC5yN3vBgUvPIkc128r0TalWRUbl9pffDfXsuyCYzfpeX/zqgAQiCbh5mDAsOlp7KHtwlTdvffKiCqa4Y3n7Kp71Au5TctDJRKeVzsE09oSA2JfUvM72gL5wR2onnjms1rDPfym2ASSAZsbj/IjcJPNwI9ki4d9r/Y0DsbItfru3LFHz6n4nw0PUPXvmnKoOWHZv6oLZ4xyH40miA4EJHMt2gs3YE4bn+O1zXpvnhpBq5cI74epN7s3mcbV7lYTiJYhjMT2+A4PGuLqsuNhDEke5ucSIHQM6R8BeEIVt6xtsEy7j2QfmuDuWhIHJhGZPw==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=valinux.co.jp header.i="@valinux.co.jp" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=valinux.co.jp;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Julien Grall <julien@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, "Daniel P. Smith" <dpsmith@xxxxxxxxxxxxxxxxxxxx>, Juergen Gross <jgross@xxxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Fri, 15 May 2026 04:08:39 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Add the common loop for memory-transferring Stateful RMI Operations. It
keeps donate, reclaim and cancel handling out of each lifecycle caller.

Signed-off-by: Koichiro Den <den@xxxxxxxxxxxxx>
---
 xen/arch/arm/cca/Makefile |   1 +
 xen/arch/arm/cca/sro.c    | 485 ++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/cca/sro.h    |  25 ++
 3 files changed, 511 insertions(+)
 create mode 100644 xen/arch/arm/cca/sro.c
 create mode 100644 xen/arch/arm/cca/sro.h

diff --git a/xen/arch/arm/cca/Makefile b/xen/arch/arm/cca/Makefile
index 57c3986d5de8..bf6d9b58ebec 100644
--- a/xen/arch/arm/cca/Makefile
+++ b/xen/arch/arm/cca/Makefile
@@ -1,4 +1,5 @@
 obj-y += granule.o
 obj-y += realm.o
 obj-y += rmi.o
+obj-y += sro.o
 obj-y += state.o
diff --git a/xen/arch/arm/cca/sro.c b/xen/arch/arm/cca/sro.c
new file mode 100644
index 000000000000..d17810388398
--- /dev/null
+++ b/xen/arch/arm/cca/sro.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <xen/domain_page.h>
+#include <xen/errno.h>
+#include <xen/mm.h>
+#include <xen/xmalloc.h>
+
+#include <asm/cca.h>
+
+#include "rmi.h"
+#include "sro.h"
+
+static struct page_info *arm_cca_sro_alloc_page(void)
+{
+    struct page_info *pg;
+    void *va;
+
+    pg = alloc_domheap_page(NULL, 0);
+    if ( !pg )
+        return NULL;
+
+    va = map_domain_page(page_to_mfn(pg));
+    clear_page(va);
+    unmap_domain_page(va);
+
+    return pg;
+}
+
+static unsigned long arm_cca_sro_donate_req_count(unsigned long req)
+{
+    return arm_cca_rmi_field_get(req,
+                                 ARM_CCA_RMI_OP_DONATE_BLK_COUNT_SHIFT,
+                                 ARM_CCA_RMI_OP_DONATE_BLK_COUNT_WIDTH);
+}
+
+static uint64_t arm_cca_sro_addr_desc_4k(paddr_t pa)
+{
+    return ((uint64_t)ARM_CCA_RMI_PAGE_L3 <<
+            ARM_CCA_RMI_ADDR_DESC_4K_SZ_SHIFT) |
+           (1ULL << ARM_CCA_RMI_ADDR_DESC_4K_CNT_SHIFT) |
+           (((uint64_t)pa >> PAGE_SHIFT) <<
+            ARM_CCA_RMI_ADDR_DESC_4K_ADDR_SHIFT) |
+           ((uint64_t)ARM_CCA_RMI_OP_MEM_DELEGATED <<
+            ARM_CCA_RMI_ADDR_DESC_4K_ST_SHIFT);
+}
+
+static int
+arm_cca_sro_free_delegated_pages(const struct arm_cca_sro_mem_xfer *xfer,
+                                 struct page_info **pages,
+                                 unsigned int nr_pages)
+{
+    unsigned int i;
+    int rc = 0;
+
+    for ( i = 0; i < nr_pages; ++i )
+    {
+        if ( !pages[i] )
+            continue;
+
+        if ( arm_cca_undelegate_granule(page_to_maddr(pages[i])) != 0 )
+        {
+            page_list_add_tail(pages[i], xfer->abandoned_pages);
+            pages[i] = NULL;
+            rc = -EIO;
+            continue;
+        }
+
+        free_domheap_page(pages[i]);
+        pages[i] = NULL;
+    }
+
+    return rc;
+}
+
+static int
+arm_cca_sro_validate_donate_req(const struct arm_cca_sro_mem_xfer *xfer,
+                                unsigned long req, unsigned long *count)
+{
+    unsigned long blk_size, contig, state;
+
+    if ( !xfer || !xfer->pages || !xfer->nr_pages || !xfer->max_pages ||
+         !xfer->abandoned_pages )
+        return -EOPNOTSUPP;
+
+    blk_size = arm_cca_rmi_field_get(req,
+                                     ARM_CCA_RMI_OP_DONATE_BLK_SIZE_SHIFT,
+                                     ARM_CCA_RMI_OP_DONATE_BLK_SIZE_WIDTH);
+    contig = arm_cca_rmi_field_get(req,
+                                   ARM_CCA_RMI_OP_DONATE_MEM_CONTIG_SHIFT,
+                                   ARM_CCA_RMI_OP_DONATE_MEM_CONTIG_WIDTH);
+    state = arm_cca_rmi_field_get(req,
+                                  ARM_CCA_RMI_OP_DONATE_MEM_STATE_SHIFT,
+                                  ARM_CCA_RMI_OP_DONATE_MEM_STATE_WIDTH);
+    *count = arm_cca_sro_donate_req_count(req);
+
+    /*
+     * Xen donates 4KB, non-contiguous, already-delegated granules here
+     * because they can be allocated and tracked as individual pages.
+     */
+    if ( blk_size != ARM_CCA_RMI_PAGE_L3 ||
+         contig != ARM_CCA_RMI_OP_MEM_NON_CONTIG ||
+         state != ARM_CCA_RMI_OP_MEM_DELEGATED )
+        return -EOPNOTSUPP;
+
+    if ( *count == 0 )
+        return -EINVAL;
+
+    if ( *xfer->nr_pages > xfer->max_pages ||
+         *count > xfer->max_pages - *xfer->nr_pages )
+        return -E2BIG;
+
+    if ( *count > PAGE_SIZE / sizeof(uint64_t) )
+        return -E2BIG;
+
+    return 0;
+}
+
+static int arm_cca_sro_donate_pages(unsigned long handle,
+                                    unsigned long donate_req,
+                                    const struct arm_cca_sro_mem_xfer *xfer,
+                                    struct arm_smccc_res *res)
+{
+    struct page_info *list_pg = NULL;
+    struct page_info **pages = NULL;
+    uint64_t *list;
+    unsigned long count, consumed;
+    unsigned int i;
+    int cleanup_rc, rc;
+
+    rc = arm_cca_sro_validate_donate_req(xfer, donate_req, &count);
+    if ( rc != 0 )
+        return rc;
+
+    pages = xzalloc_array(struct page_info *, count);
+    if ( !pages )
+        return -ENOMEM;
+
+    list_pg = arm_cca_sro_alloc_page();
+    if ( !list_pg )
+    {
+        rc = -ENOMEM;
+        goto out;
+    }
+
+    list = map_domain_page(page_to_mfn(list_pg));
+
+    for ( i = 0; i < count; ++i )
+    {
+        pages[i] = arm_cca_sro_alloc_page();
+        if ( !pages[i] )
+        {
+            rc = -ENOMEM;
+            goto out_unmap;
+        }
+
+        rc = arm_cca_delegate_granule(page_to_maddr(pages[i]));
+        if ( rc != 0 )
+        {
+            free_domheap_page(pages[i]);
+            pages[i] = NULL;
+            goto out_unmap;
+        }
+
+        list[i] = arm_cca_sro_addr_desc_4k(page_to_maddr(pages[i]));
+    }
+
+    rc = arm_cca_rmi_op_mem_donate(handle, page_to_maddr(list_pg), count, res);
+    /*
+     * donated_count is valid regardless of the RMI status.
+     * See DEN0137 2.0-bet1 - B4.3.2.2 Donating memory to an SRO
+     */
+    consumed = res->a1;
+    if ( consumed > count )
+    {
+        rc = -EIO;
+        consumed = count;
+    }
+
+    for ( i = 0; i < consumed; ++i )
+    {
+        xfer->pages[*xfer->nr_pages] = pages[i];
+        (*xfer->nr_pages)++;
+        pages[i] = NULL;
+    }
+
+out_unmap:
+    unmap_domain_page(list);
+    free_domheap_page(list_pg);
+
+out:
+    cleanup_rc = arm_cca_sro_free_delegated_pages(xfer, pages, count);
+    if ( cleanup_rc != 0 )
+        rc = cleanup_rc;
+    xfree(pages);
+
+    return rc;
+}
+
+static int arm_cca_sro_continue(unsigned long handle,
+                                struct arm_smccc_res *res)
+{
+    return arm_cca_rmi_op_continue(handle, ARM_CCA_RMI_CONTINUE_KEEP_GOING,
+                                   res);
+}
+
+static bool arm_cca_sro_is_pending(const struct arm_smccc_res *res)
+{
+    uint64_t result = arm_cca_rmi_result(res);
+
+    return arm_cca_rmi_status_is(result, ARM_CCA_RMI_INCOMPLETE) ||
+           arm_cca_rmi_status_is(result, ARM_CCA_RMI_BUSY);
+}
+
+static int arm_cca_sro_reclaim_pages(unsigned long handle,
+                                     const struct arm_cca_sro_mem_xfer *xfer,
+                                     struct arm_smccc_res *res);
+
+static int
+arm_cca_sro_validate_reclaim_xfer(const struct arm_cca_sro_mem_xfer *xfer)
+{
+    if ( !xfer || !xfer->pages || !xfer->nr_pages || !xfer->abandoned_pages )
+        return -EOPNOTSUPP;
+
+    if ( *xfer->nr_pages == 0 )
+        return -EIO;
+
+    return 0;
+}
+
+static int arm_cca_sro_cancel(unsigned long handle,
+                              const struct arm_cca_sro_mem_xfer *xfer,
+                              struct arm_smccc_res *res)
+{
+    int rc = arm_cca_rmi_op_cancel(handle, res);
+
+    if ( rc != 0 && !arm_cca_sro_is_pending(res) )
+        return rc;
+
+    while ( arm_cca_sro_is_pending(res) )
+    {
+        unsigned long mem_req;
+
+        if ( arm_cca_rmi_status_is(arm_cca_rmi_result(res),
+                                   ARM_CCA_RMI_BUSY) )
+        {
+            rc = arm_cca_sro_continue(handle, res);
+            if ( rc < 0 )
+                return rc;
+            if ( !arm_cca_sro_is_pending(res) )
+                return rc;
+            continue;
+        }
+
+        mem_req = arm_cca_rmi_sro_mem_req(res->a0);
+
+        switch ( mem_req )
+        {
+        case ARM_CCA_RMI_OP_MEM_REQ_RECLAIM:
+            rc = arm_cca_sro_validate_reclaim_xfer(xfer);
+            if ( rc != 0 )
+                return rc;
+            rc = arm_cca_sro_reclaim_pages(handle, xfer, res);
+            break;
+
+        case ARM_CCA_RMI_OP_MEM_REQ_NONE:
+            rc = arm_cca_sro_continue(handle, res);
+            if ( rc < 0 )
+                return rc;
+            if ( !arm_cca_sro_is_pending(res) )
+                return rc;
+            break;
+
+        default:
+            return -EIO;
+        }
+
+        if ( rc < 0 )
+            return rc;
+    }
+
+    return rc;
+}
+
+int arm_cca_sro_complete_mem_transfer(int rc, struct arm_smccc_res *res,
+                                      const struct arm_cca_sro_mem_xfer *xfer)
+{
+    unsigned long handle;
+    bool can_cancel = false;
+
+    if ( rc != 0 && !arm_cca_sro_is_pending(res) )
+        return rc;
+
+    if ( !arm_cca_rmi_status_is(arm_cca_rmi_result(res),
+                                ARM_CCA_RMI_INCOMPLETE) )
+        return rc;
+
+    handle = res->a1;
+
+    while ( arm_cca_sro_is_pending(res) )
+    {
+        uint64_t result = arm_cca_rmi_result(res);
+        unsigned long mem_req;
+
+        if ( arm_cca_rmi_status_is(result, ARM_CCA_RMI_BUSY) )
+        {
+            rc = arm_cca_sro_continue(handle, res);
+        }
+        else
+        {
+            can_cancel = arm_cca_rmi_sro_can_cancel(result);
+            mem_req = arm_cca_rmi_sro_mem_req(result);
+
+            switch ( mem_req )
+            {
+            case ARM_CCA_RMI_OP_MEM_REQ_DONATE:
+                rc = arm_cca_sro_donate_pages(handle, res->a2, xfer, res);
+                /*
+                 * RMM records a failed donation through the SRO context.  The
+                 * Host must continue the SRO after an RMI_OP_MEM_DONATE error.
+                 *
+                 * See DEN0137 2.0-bet1 - B4.3.2.2
+                 */
+                if ( arm_cca_rmi_status_is_error(arm_cca_rmi_result(res)) )
+                    rc = arm_cca_sro_continue(handle, res);
+                break;
+
+            case ARM_CCA_RMI_OP_MEM_REQ_RECLAIM:
+                rc = arm_cca_sro_validate_reclaim_xfer(xfer);
+                if ( rc != 0 )
+                    break;
+
+                rc = arm_cca_sro_reclaim_pages(handle, xfer, res);
+                break;
+
+            case ARM_CCA_RMI_OP_MEM_REQ_NONE:
+                rc = arm_cca_sro_continue(handle, res);
+                break;
+
+            default:
+                rc = -EOPNOTSUPP;
+                break;
+            }
+        }
+
+        if ( rc < 0 )
+        {
+            int cancel_rc;
+
+            /*
+             * A final RMI_ERROR_* already ends the SRO.  CANCEL is only for
+             * a still-pending SRO which Xen can no longer drive.
+             */
+            if ( !arm_cca_sro_is_pending(res) )
+                break;
+
+            if ( !can_cancel )
+                break;
+
+            cancel_rc = arm_cca_sro_cancel(handle, xfer, res);
+            if ( cancel_rc != 0 )
+                rc = cancel_rc;
+            break;
+        }
+    }
+
+    return rc;
+}
+
+static int
+arm_cca_sro_forget_reclaimed_page(const struct arm_cca_sro_mem_xfer *xfer,
+                                  paddr_t pa)
+{
+    struct page_info **pages = xfer->pages;
+    unsigned int *nr_pages = xfer->nr_pages;
+    unsigned int i;
+
+    for ( i = 0; i < *nr_pages; ++i )
+    {
+        struct page_info *pg = pages[i];
+
+        if ( !pg || page_to_maddr(pg) != pa )
+            continue;
+
+        pages[i] = pages[*nr_pages - 1];
+        pages[*nr_pages - 1] = NULL;
+        (*nr_pages)--;
+
+        if ( arm_cca_undelegate_granule(pa) != 0 )
+        {
+            /*
+             * The RMM has returned the page.  Keep host undelegation failure
+             * out of the SRO state machine and retry it from relinquish.
+             */
+            page_list_add_tail(pg, xfer->abandoned_pages);
+            return 0;
+        }
+
+        free_domheap_page(pg);
+
+        return 0;
+    }
+
+    return -ENOENT;
+}
+
+static int
+arm_cca_sro_forget_reclaimed_desc(const struct arm_cca_sro_mem_xfer *xfer,
+                                  uint64_t desc)
+{
+    paddr_t pa = arm_cca_rmi_addr_desc_4k_pa(desc);
+    unsigned long count = arm_cca_rmi_addr_desc_4k_count(desc);
+    unsigned long size = arm_cca_rmi_addr_desc_4k_size(desc);
+    unsigned long state = arm_cca_rmi_addr_desc_4k_state(desc);
+    unsigned long i;
+
+    if ( size != ARM_CCA_RMI_PAGE_L3 ||
+         state != ARM_CCA_RMI_OP_MEM_DELEGATED ||
+         count == 0 )
+        return -EIO;
+
+    for ( i = 0; i < count; ++i )
+    {
+        int rc = arm_cca_sro_forget_reclaimed_page(xfer, pa);
+
+        if ( rc != 0 )
+            return rc;
+
+        pa += PAGE_SIZE;
+    }
+
+    return 0;
+}
+
+static int arm_cca_sro_reclaim_pages(unsigned long handle,
+                                     const struct arm_cca_sro_mem_xfer *xfer,
+                                     struct arm_smccc_res *res)
+{
+    struct page_info *list_pg;
+    uint64_t *list;
+    unsigned long max_descs = *xfer->nr_pages;
+    unsigned long nr_descs = 0;
+    unsigned int i;
+    int rc;
+
+    list_pg = alloc_domheap_page(NULL, 0);
+    if ( !list_pg )
+        return -ENOMEM;
+
+    list = map_domain_page(page_to_mfn(list_pg));
+    clear_page(list);
+
+    /*
+     * In the worst case each reclaimed page needs one address-list
+     * descriptor, so the tracked page count is a sufficient list capacity.
+     */
+    rc = arm_cca_rmi_op_mem_reclaim(handle, page_to_maddr(list_pg), max_descs,
+                                    res);
+
+    if ( arm_cca_rmi_status_is(arm_cca_rmi_result(res),
+                               ARM_CCA_RMI_INCOMPLETE) )
+    {
+        nr_descs = res->a1;
+        if ( nr_descs > max_descs )
+        {
+            nr_descs = max_descs;
+            rc = -EIO;
+        }
+    }
+
+    for ( i = 0; i < nr_descs; ++i )
+    {
+        int ret = arm_cca_sro_forget_reclaimed_desc(xfer, list[i]);
+
+        if ( ret != 0 )
+        {
+            rc = ret;
+            break;
+        }
+    }
+
+    unmap_domain_page(list);
+    free_domheap_page(list_pg);
+
+    return rc;
+}
diff --git a/xen/arch/arm/cca/sro.h b/xen/arch/arm/cca/sro.h
new file mode 100644
index 000000000000..71d26574fe7e
--- /dev/null
+++ b/xen/arch/arm/cca/sro.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef ARM_CCA_SRO_H
+#define ARM_CCA_SRO_H
+
+struct arm_smccc_res;
+struct page_list_head;
+struct page_info;
+
+struct arm_cca_sro_mem_xfer {
+    /*
+     * pages tracks pages accepted by the SRO and later returned by reclaim.
+     * max_pages is the capacity for accepted donations; zero disables
+     * donation.  abandoned_pages keeps delegated pages which still need
+     * host-side undelegation retry.
+     */
+    struct page_info **pages;
+    unsigned int *nr_pages;
+    unsigned int max_pages;
+    struct page_list_head *abandoned_pages;
+};
+
+int arm_cca_sro_complete_mem_transfer(int rc, struct arm_smccc_res *res,
+                                      const struct arm_cca_sro_mem_xfer *xfer);
+
+#endif /* ARM_CCA_SRO_H */
-- 
2.51.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.