|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 06/17] xen/riscv: add root page table allocation
Introduce support for allocating and initializing the root page table
required for RISC-V stage-2 address translation.
To implement root page table allocation the following is introduced:
- p2m_get_clean_page() and p2m_allocate_root() helpers to allocate and
zero a 16 KiB root page table, as mandated by the RISC-V privileged
specification for Sv39x4/Sv48x4 modes.
- Add hgatp_from_page() to construct the hgatp register value from the
allocated root page.
- Update p2m_init() to allocate the root table and initialize
p2m->root and p2m->hgatp.
- Add maddr_to_page() and page_to_maddr() macros for easier address
manipulation.
- Allocate root p2m table after p2m pool is initialized.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
Changes in v2:
- This patch was created from "xen/riscv: introduce things necessary for p2m
initialization" with the following changes:
- [clear_and_clean_page()] Add missed call of clean_dcache_va_range().
- Drop p2m_get_clean_page() as it is going to be used only once to allocate
root page table. Open-code it explicittly in p2m_allocate_root(). Also,
it will help avoid duplication of the code connected to order and nr_pages
of p2m root page table.
- Instead of using order 2 for alloc_domheap_pages(), use
get_order_from_bytes(KB(16)).
- Clear and clean a proper amount of allocated pages in p2m_allocate_root().
- Drop _info from the function name hgatp_from_page_info() and its argument
page_info.
- Introduce HGATP_MODE_MASK and use MASK_INSR() instead of shift to calculate
value of hgatp.
- Drop unnecessary parentheses in definition of page_to_maddr().
- Add support of VMID.
- Drop TLB flushing in p2m_alloc_root_table() and do that once when VMID
is re-used. [Look at p2m_alloc_vmid()]
- Allocate p2m root table after p2m pool is fully initialized: first
return pages to p2m pool them allocate p2m root table.
---
xen/arch/riscv/include/asm/mm.h | 4 +
xen/arch/riscv/include/asm/p2m.h | 6 ++
xen/arch/riscv/include/asm/riscv_encoding.h | 4 +
xen/arch/riscv/p2m.c | 94 +++++++++++++++++++++
4 files changed, 108 insertions(+)
diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index 01bbd92a06..912bc79e1b 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -149,6 +149,10 @@ extern struct page_info *frametable_virt_start;
#define mfn_to_page(mfn) (frametable_virt_start + mfn_x(mfn))
#define page_to_mfn(pg) _mfn((pg) - frametable_virt_start)
+/* Convert between machine addresses and page-info structures. */
+#define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
+#define page_to_maddr(pg) mfn_to_maddr(page_to_mfn(pg))
+
static inline void *page_to_virt(const struct page_info *pg)
{
return mfn_to_virt(mfn_x(page_to_mfn(pg)));
diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
index 9570eff014..a31b05bd50 100644
--- a/xen/arch/riscv/include/asm/p2m.h
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -26,6 +26,12 @@ struct p2m_domain {
/* Pages used to construct the p2m */
struct page_list_head pages;
+ /* The root of the p2m tree. May be concatenated */
+ struct page_info *root;
+
+ /* Address Translation Table for the p2m */
+ paddr_t hgatp;
+
/* Indicate if it is required to clean the cache when writing an entry */
bool clean_pte;
diff --git a/xen/arch/riscv/include/asm/riscv_encoding.h
b/xen/arch/riscv/include/asm/riscv_encoding.h
index 6cc8f4eb45..a71b7546ef 100644
--- a/xen/arch/riscv/include/asm/riscv_encoding.h
+++ b/xen/arch/riscv/include/asm/riscv_encoding.h
@@ -133,11 +133,13 @@
#define HGATP_MODE_SV48X4 _UL(9)
#define HGATP32_MODE_SHIFT 31
+#define HGATP32_MODE_MASK _UL(0x80000000)
#define HGATP32_VMID_SHIFT 22
#define HGATP32_VMID_MASK _UL(0x1FC00000)
#define HGATP32_PPN _UL(0x003FFFFF)
#define HGATP64_MODE_SHIFT 60
+#define HGATP64_MODE_MASK _ULL(0xF000000000000000)
#define HGATP64_VMID_SHIFT 44
#define HGATP64_VMID_MASK _ULL(0x03FFF00000000000)
#define HGATP64_PPN _ULL(0x00000FFFFFFFFFFF)
@@ -170,6 +172,7 @@
#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT
#define HGATP_VMID_MASK HGATP64_VMID_MASK
#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT
+#define HGATP_MODE_MASK HGATP64_MODE_MASK
#else
#define MSTATUS_SD MSTATUS32_SD
#define SSTATUS_SD SSTATUS32_SD
@@ -181,6 +184,7 @@
#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT
#define HGATP_VMID_MASK HGATP32_VMID_MASK
#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
+#define HGATP_MODE_MASK HGATP32_MODE_MASK
#endif
#define TOPI_IID_SHIFT 16
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index e409997499..2419a61d8c 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -41,6 +41,91 @@ void p2m_write_unlock(struct p2m_domain *p2m)
write_unlock(&p2m->lock);
}
+static void clear_and_clean_page(struct page_info *page)
+{
+ clean_dcache_va_range(page, PAGE_SIZE);
+ clear_domain_page(page_to_mfn(page));
+}
+
+static struct page_info *p2m_allocate_root(struct domain *d)
+{
+ struct page_info *page;
+ unsigned int order = get_order_from_bytes(KB(16));
+ unsigned int nr_pages = _AC(1,U) << order;
+
+ /* Return back nr_pages necessary for p2m root table. */
+
+ if ( ACCESS_ONCE(d->arch.paging.p2m_total_pages) < nr_pages )
+ panic("Specify more xen,domain-p2m-mem-mb\n");
+
+ for ( unsigned int i = 0; i < nr_pages; i++ )
+ {
+ /* Return memory to domheap. */
+ page = page_list_remove_head(&d->arch.paging.p2m_freelist);
+ if( page )
+ {
+ ACCESS_ONCE(d->arch.paging.p2m_total_pages)--;
+ free_domheap_page(page);
+ }
+ else
+ {
+ printk(XENLOG_ERR
+ "Failed to free P2M pages, P2M freelist is empty.\n");
+ return NULL;
+ }
+ }
+
+ /* Allocate memory for p2m root table. */
+
+ /*
+ * As mentioned in the Priviliged Architecture Spec (version 20240411)
+ * As explained in Section 18.5.1, for the paged virtual-memory schemes
+ * (Sv32x4, Sv39x4, Sv48x4, and Sv57x4), the root page table is 16 KiB
+ * and must be aligned to a 16-KiB boundary.
+ */
+ page = alloc_domheap_pages(d, order, MEMF_no_owner);
+ if ( page == NULL )
+ return NULL;
+
+ for ( unsigned int i = 0; i < nr_pages; i++ )
+ clear_and_clean_page(page + i);
+
+ return page;
+}
+
+static unsigned long hgatp_from_page(struct p2m_domain *p2m)
+{
+ struct page_info *p2m_root_page = p2m->root;
+ unsigned long ppn;
+ unsigned long hgatp_mode;
+
+ ppn = PFN_DOWN(page_to_maddr(p2m_root_page)) & HGATP_PPN;
+
+#if RV_STAGE1_MODE == SATP_MODE_SV39
+ hgatp_mode = HGATP_MODE_SV39X4;
+#elif RV_STAGE1_MODE == SATP_MODE_SV48
+ hgatp_mode = HGATP_MODE_SV48X4;
+#else
+# error "add HGATP_MODE"
+#endif
+
+ return ppn | MASK_INSR(p2m->vmid, HGATP_VMID_MASK) |
+ MASK_INSR(hgatp_mode, HGATP_MODE_MASK);
+}
+
+static int p2m_alloc_root_table(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ p2m->root = p2m_allocate_root(d);
+ if ( !p2m->root )
+ return -ENOMEM;
+
+ p2m->hgatp = hgatp_from_page(p2m);
+
+ return 0;
+}
+
static spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED;
/*
@@ -228,5 +313,14 @@ int p2m_set_allocation(struct domain *d, unsigned long
pages, bool *preempted)
}
}
+ /*
+ * First, wait for the p2m pool to be initialized. Then allocate the root
+ * table so that the necessary pages can be returned from the p2m pool,
+ * since the root table must be allocated using alloc_domheap_pages(...)
+ * to meet its specific requirements.
+ */
+ if ( !d->arch.p2m.root )
+ p2m_alloc_root_table(d);
+
return 0;
}
--
2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |