|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH v1 08/26] xen/arm/cca: add shared SRO helpers
Add the common loop for memory-transferring Stateful RMI Operations. It
keeps donate, reclaim and cancel handling out of each lifecycle caller.
Signed-off-by: Koichiro Den <den@xxxxxxxxxxxxx>
---
xen/arch/arm/cca/Makefile | 1 +
xen/arch/arm/cca/sro.c | 485 ++++++++++++++++++++++++++++++++++++++
xen/arch/arm/cca/sro.h | 25 ++
3 files changed, 511 insertions(+)
create mode 100644 xen/arch/arm/cca/sro.c
create mode 100644 xen/arch/arm/cca/sro.h
diff --git a/xen/arch/arm/cca/Makefile b/xen/arch/arm/cca/Makefile
index 57c3986d5de8..bf6d9b58ebec 100644
--- a/xen/arch/arm/cca/Makefile
+++ b/xen/arch/arm/cca/Makefile
@@ -1,4 +1,5 @@
obj-y += granule.o
obj-y += realm.o
obj-y += rmi.o
+obj-y += sro.o
obj-y += state.o
diff --git a/xen/arch/arm/cca/sro.c b/xen/arch/arm/cca/sro.c
new file mode 100644
index 000000000000..d17810388398
--- /dev/null
+++ b/xen/arch/arm/cca/sro.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <xen/domain_page.h>
+#include <xen/errno.h>
+#include <xen/mm.h>
+#include <xen/xmalloc.h>
+
+#include <asm/cca.h>
+
+#include "rmi.h"
+#include "sro.h"
+
+static struct page_info *arm_cca_sro_alloc_page(void)
+{
+ struct page_info *pg;
+ void *va;
+
+ pg = alloc_domheap_page(NULL, 0);
+ if ( !pg )
+ return NULL;
+
+ va = map_domain_page(page_to_mfn(pg));
+ clear_page(va);
+ unmap_domain_page(va);
+
+ return pg;
+}
+
+static unsigned long arm_cca_sro_donate_req_count(unsigned long req)
+{
+ return arm_cca_rmi_field_get(req,
+ ARM_CCA_RMI_OP_DONATE_BLK_COUNT_SHIFT,
+ ARM_CCA_RMI_OP_DONATE_BLK_COUNT_WIDTH);
+}
+
+static uint64_t arm_cca_sro_addr_desc_4k(paddr_t pa)
+{
+ return ((uint64_t)ARM_CCA_RMI_PAGE_L3 <<
+ ARM_CCA_RMI_ADDR_DESC_4K_SZ_SHIFT) |
+ (1ULL << ARM_CCA_RMI_ADDR_DESC_4K_CNT_SHIFT) |
+ (((uint64_t)pa >> PAGE_SHIFT) <<
+ ARM_CCA_RMI_ADDR_DESC_4K_ADDR_SHIFT) |
+ ((uint64_t)ARM_CCA_RMI_OP_MEM_DELEGATED <<
+ ARM_CCA_RMI_ADDR_DESC_4K_ST_SHIFT);
+}
+
+static int
+arm_cca_sro_free_delegated_pages(const struct arm_cca_sro_mem_xfer *xfer,
+ struct page_info **pages,
+ unsigned int nr_pages)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for ( i = 0; i < nr_pages; ++i )
+ {
+ if ( !pages[i] )
+ continue;
+
+ if ( arm_cca_undelegate_granule(page_to_maddr(pages[i])) != 0 )
+ {
+ page_list_add_tail(pages[i], xfer->abandoned_pages);
+ pages[i] = NULL;
+ rc = -EIO;
+ continue;
+ }
+
+ free_domheap_page(pages[i]);
+ pages[i] = NULL;
+ }
+
+ return rc;
+}
+
+static int
+arm_cca_sro_validate_donate_req(const struct arm_cca_sro_mem_xfer *xfer,
+ unsigned long req, unsigned long *count)
+{
+ unsigned long blk_size, contig, state;
+
+ if ( !xfer || !xfer->pages || !xfer->nr_pages || !xfer->max_pages ||
+ !xfer->abandoned_pages )
+ return -EOPNOTSUPP;
+
+ blk_size = arm_cca_rmi_field_get(req,
+ ARM_CCA_RMI_OP_DONATE_BLK_SIZE_SHIFT,
+ ARM_CCA_RMI_OP_DONATE_BLK_SIZE_WIDTH);
+ contig = arm_cca_rmi_field_get(req,
+ ARM_CCA_RMI_OP_DONATE_MEM_CONTIG_SHIFT,
+ ARM_CCA_RMI_OP_DONATE_MEM_CONTIG_WIDTH);
+ state = arm_cca_rmi_field_get(req,
+ ARM_CCA_RMI_OP_DONATE_MEM_STATE_SHIFT,
+ ARM_CCA_RMI_OP_DONATE_MEM_STATE_WIDTH);
+ *count = arm_cca_sro_donate_req_count(req);
+
+ /*
+ * Xen donates 4KB, non-contiguous, already-delegated granules here
+ * because they can be allocated and tracked as individual pages.
+ */
+ if ( blk_size != ARM_CCA_RMI_PAGE_L3 ||
+ contig != ARM_CCA_RMI_OP_MEM_NON_CONTIG ||
+ state != ARM_CCA_RMI_OP_MEM_DELEGATED )
+ return -EOPNOTSUPP;
+
+ if ( *count == 0 )
+ return -EINVAL;
+
+ if ( *xfer->nr_pages > xfer->max_pages ||
+ *count > xfer->max_pages - *xfer->nr_pages )
+ return -E2BIG;
+
+ if ( *count > PAGE_SIZE / sizeof(uint64_t) )
+ return -E2BIG;
+
+ return 0;
+}
+
+static int arm_cca_sro_donate_pages(unsigned long handle,
+ unsigned long donate_req,
+ const struct arm_cca_sro_mem_xfer *xfer,
+ struct arm_smccc_res *res)
+{
+ struct page_info *list_pg = NULL;
+ struct page_info **pages = NULL;
+ uint64_t *list;
+ unsigned long count, consumed;
+ unsigned int i;
+ int cleanup_rc, rc;
+
+ rc = arm_cca_sro_validate_donate_req(xfer, donate_req, &count);
+ if ( rc != 0 )
+ return rc;
+
+ pages = xzalloc_array(struct page_info *, count);
+ if ( !pages )
+ return -ENOMEM;
+
+ list_pg = arm_cca_sro_alloc_page();
+ if ( !list_pg )
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ list = map_domain_page(page_to_mfn(list_pg));
+
+ for ( i = 0; i < count; ++i )
+ {
+ pages[i] = arm_cca_sro_alloc_page();
+ if ( !pages[i] )
+ {
+ rc = -ENOMEM;
+ goto out_unmap;
+ }
+
+ rc = arm_cca_delegate_granule(page_to_maddr(pages[i]));
+ if ( rc != 0 )
+ {
+ free_domheap_page(pages[i]);
+ pages[i] = NULL;
+ goto out_unmap;
+ }
+
+ list[i] = arm_cca_sro_addr_desc_4k(page_to_maddr(pages[i]));
+ }
+
+ rc = arm_cca_rmi_op_mem_donate(handle, page_to_maddr(list_pg), count, res);
+ /*
+ * donated_count is valid regardless of the RMI status.
+ * See DEN0137 2.0-bet1 - B4.3.2.2 Donating memory to an SRO
+ */
+ consumed = res->a1;
+ if ( consumed > count )
+ {
+ rc = -EIO;
+ consumed = count;
+ }
+
+ for ( i = 0; i < consumed; ++i )
+ {
+ xfer->pages[*xfer->nr_pages] = pages[i];
+ (*xfer->nr_pages)++;
+ pages[i] = NULL;
+ }
+
+out_unmap:
+ unmap_domain_page(list);
+ free_domheap_page(list_pg);
+
+out:
+ cleanup_rc = arm_cca_sro_free_delegated_pages(xfer, pages, count);
+ if ( cleanup_rc != 0 )
+ rc = cleanup_rc;
+ xfree(pages);
+
+ return rc;
+}
+
+static int arm_cca_sro_continue(unsigned long handle,
+ struct arm_smccc_res *res)
+{
+ return arm_cca_rmi_op_continue(handle, ARM_CCA_RMI_CONTINUE_KEEP_GOING,
+ res);
+}
+
+static bool arm_cca_sro_is_pending(const struct arm_smccc_res *res)
+{
+ uint64_t result = arm_cca_rmi_result(res);
+
+ return arm_cca_rmi_status_is(result, ARM_CCA_RMI_INCOMPLETE) ||
+ arm_cca_rmi_status_is(result, ARM_CCA_RMI_BUSY);
+}
+
+static int arm_cca_sro_reclaim_pages(unsigned long handle,
+ const struct arm_cca_sro_mem_xfer *xfer,
+ struct arm_smccc_res *res);
+
+static int
+arm_cca_sro_validate_reclaim_xfer(const struct arm_cca_sro_mem_xfer *xfer)
+{
+ if ( !xfer || !xfer->pages || !xfer->nr_pages || !xfer->abandoned_pages )
+ return -EOPNOTSUPP;
+
+ if ( *xfer->nr_pages == 0 )
+ return -EIO;
+
+ return 0;
+}
+
+static int arm_cca_sro_cancel(unsigned long handle,
+ const struct arm_cca_sro_mem_xfer *xfer,
+ struct arm_smccc_res *res)
+{
+ int rc = arm_cca_rmi_op_cancel(handle, res);
+
+ if ( rc != 0 && !arm_cca_sro_is_pending(res) )
+ return rc;
+
+ while ( arm_cca_sro_is_pending(res) )
+ {
+ unsigned long mem_req;
+
+ if ( arm_cca_rmi_status_is(arm_cca_rmi_result(res),
+ ARM_CCA_RMI_BUSY) )
+ {
+ rc = arm_cca_sro_continue(handle, res);
+ if ( rc < 0 )
+ return rc;
+ if ( !arm_cca_sro_is_pending(res) )
+ return rc;
+ continue;
+ }
+
+ mem_req = arm_cca_rmi_sro_mem_req(res->a0);
+
+ switch ( mem_req )
+ {
+ case ARM_CCA_RMI_OP_MEM_REQ_RECLAIM:
+ rc = arm_cca_sro_validate_reclaim_xfer(xfer);
+ if ( rc != 0 )
+ return rc;
+ rc = arm_cca_sro_reclaim_pages(handle, xfer, res);
+ break;
+
+ case ARM_CCA_RMI_OP_MEM_REQ_NONE:
+ rc = arm_cca_sro_continue(handle, res);
+ if ( rc < 0 )
+ return rc;
+ if ( !arm_cca_sro_is_pending(res) )
+ return rc;
+ break;
+
+ default:
+ return -EIO;
+ }
+
+ if ( rc < 0 )
+ return rc;
+ }
+
+ return rc;
+}
+
+int arm_cca_sro_complete_mem_transfer(int rc, struct arm_smccc_res *res,
+ const struct arm_cca_sro_mem_xfer *xfer)
+{
+ unsigned long handle;
+ bool can_cancel = false;
+
+ if ( rc != 0 && !arm_cca_sro_is_pending(res) )
+ return rc;
+
+ if ( !arm_cca_rmi_status_is(arm_cca_rmi_result(res),
+ ARM_CCA_RMI_INCOMPLETE) )
+ return rc;
+
+ handle = res->a1;
+
+ while ( arm_cca_sro_is_pending(res) )
+ {
+ uint64_t result = arm_cca_rmi_result(res);
+ unsigned long mem_req;
+
+ if ( arm_cca_rmi_status_is(result, ARM_CCA_RMI_BUSY) )
+ {
+ rc = arm_cca_sro_continue(handle, res);
+ }
+ else
+ {
+ can_cancel = arm_cca_rmi_sro_can_cancel(result);
+ mem_req = arm_cca_rmi_sro_mem_req(result);
+
+ switch ( mem_req )
+ {
+ case ARM_CCA_RMI_OP_MEM_REQ_DONATE:
+ rc = arm_cca_sro_donate_pages(handle, res->a2, xfer, res);
+ /*
+ * RMM records a failed donation through the SRO context. The
+ * Host must continue the SRO after an RMI_OP_MEM_DONATE error.
+ *
+ * See DEN0137 2.0-bet1 - B4.3.2.2
+ */
+ if ( arm_cca_rmi_status_is_error(arm_cca_rmi_result(res)) )
+ rc = arm_cca_sro_continue(handle, res);
+ break;
+
+ case ARM_CCA_RMI_OP_MEM_REQ_RECLAIM:
+ rc = arm_cca_sro_validate_reclaim_xfer(xfer);
+ if ( rc != 0 )
+ break;
+
+ rc = arm_cca_sro_reclaim_pages(handle, xfer, res);
+ break;
+
+ case ARM_CCA_RMI_OP_MEM_REQ_NONE:
+ rc = arm_cca_sro_continue(handle, res);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ if ( rc < 0 )
+ {
+ int cancel_rc;
+
+ /*
+ * A final RMI_ERROR_* already ends the SRO. CANCEL is only for
+ * a still-pending SRO which Xen can no longer drive.
+ */
+ if ( !arm_cca_sro_is_pending(res) )
+ break;
+
+ if ( !can_cancel )
+ break;
+
+ cancel_rc = arm_cca_sro_cancel(handle, xfer, res);
+ if ( cancel_rc != 0 )
+ rc = cancel_rc;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+arm_cca_sro_forget_reclaimed_page(const struct arm_cca_sro_mem_xfer *xfer,
+ paddr_t pa)
+{
+ struct page_info **pages = xfer->pages;
+ unsigned int *nr_pages = xfer->nr_pages;
+ unsigned int i;
+
+ for ( i = 0; i < *nr_pages; ++i )
+ {
+ struct page_info *pg = pages[i];
+
+ if ( !pg || page_to_maddr(pg) != pa )
+ continue;
+
+ pages[i] = pages[*nr_pages - 1];
+ pages[*nr_pages - 1] = NULL;
+ (*nr_pages)--;
+
+ if ( arm_cca_undelegate_granule(pa) != 0 )
+ {
+ /*
+ * The RMM has returned the page. Keep host undelegation failure
+ * out of the SRO state machine and retry it from relinquish.
+ */
+ page_list_add_tail(pg, xfer->abandoned_pages);
+ return 0;
+ }
+
+ free_domheap_page(pg);
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+arm_cca_sro_forget_reclaimed_desc(const struct arm_cca_sro_mem_xfer *xfer,
+ uint64_t desc)
+{
+ paddr_t pa = arm_cca_rmi_addr_desc_4k_pa(desc);
+ unsigned long count = arm_cca_rmi_addr_desc_4k_count(desc);
+ unsigned long size = arm_cca_rmi_addr_desc_4k_size(desc);
+ unsigned long state = arm_cca_rmi_addr_desc_4k_state(desc);
+ unsigned long i;
+
+ if ( size != ARM_CCA_RMI_PAGE_L3 ||
+ state != ARM_CCA_RMI_OP_MEM_DELEGATED ||
+ count == 0 )
+ return -EIO;
+
+ for ( i = 0; i < count; ++i )
+ {
+ int rc = arm_cca_sro_forget_reclaimed_page(xfer, pa);
+
+ if ( rc != 0 )
+ return rc;
+
+ pa += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int arm_cca_sro_reclaim_pages(unsigned long handle,
+ const struct arm_cca_sro_mem_xfer *xfer,
+ struct arm_smccc_res *res)
+{
+ struct page_info *list_pg;
+ uint64_t *list;
+ unsigned long max_descs = *xfer->nr_pages;
+ unsigned long nr_descs = 0;
+ unsigned int i;
+ int rc;
+
+ list_pg = alloc_domheap_page(NULL, 0);
+ if ( !list_pg )
+ return -ENOMEM;
+
+ list = map_domain_page(page_to_mfn(list_pg));
+ clear_page(list);
+
+ /*
+ * In the worst case each reclaimed page needs one address-list
+ * descriptor, so the tracked page count is a sufficient list capacity.
+ */
+ rc = arm_cca_rmi_op_mem_reclaim(handle, page_to_maddr(list_pg), max_descs,
+ res);
+
+ if ( arm_cca_rmi_status_is(arm_cca_rmi_result(res),
+ ARM_CCA_RMI_INCOMPLETE) )
+ {
+ nr_descs = res->a1;
+ if ( nr_descs > max_descs )
+ {
+ nr_descs = max_descs;
+ rc = -EIO;
+ }
+ }
+
+ for ( i = 0; i < nr_descs; ++i )
+ {
+ int ret = arm_cca_sro_forget_reclaimed_desc(xfer, list[i]);
+
+ if ( ret != 0 )
+ {
+ rc = ret;
+ break;
+ }
+ }
+
+ unmap_domain_page(list);
+ free_domheap_page(list_pg);
+
+ return rc;
+}
diff --git a/xen/arch/arm/cca/sro.h b/xen/arch/arm/cca/sro.h
new file mode 100644
index 000000000000..71d26574fe7e
--- /dev/null
+++ b/xen/arch/arm/cca/sro.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef ARM_CCA_SRO_H
+#define ARM_CCA_SRO_H
+
+struct arm_smccc_res;
+struct page_list_head;
+struct page_info;
+
+struct arm_cca_sro_mem_xfer {
+ /*
+ * pages tracks pages accepted by the SRO and later returned by reclaim.
+ * max_pages is the capacity for accepted donations; zero disables
+ * donation. abandoned_pages keeps delegated pages which still need
+ * host-side undelegation retry.
+ */
+ struct page_info **pages;
+ unsigned int *nr_pages;
+ unsigned int max_pages;
+ struct page_list_head *abandoned_pages;
+};
+
+int arm_cca_sro_complete_mem_transfer(int rc, struct arm_smccc_res *res,
+ const struct arm_cca_sro_mem_xfer *xfer);
+
+#endif /* ARM_CCA_SRO_H */
--
2.51.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |