[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 04/25] arm/altp2m: Move hostp2m init/teardown to individual functions.
This commit pulls out generic init/teardown functionality out of p2m_init and p2m_teardown into p2m_init_one, p2m_free_one, and p2m_flush_table functions. This allows our future implementation to reuse existing code for the initialization/teardown of altp2m views. Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx> --- Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Julien Grall <julien.grall@xxxxxxx> --- v2: Added the function p2m_flush_table to the previous version. --- xen/arch/arm/p2m.c | 74 +++++++++++++++++++++++++++++++++++++---------- xen/include/asm-arm/p2m.h | 11 +++++++ 2 files changed, 70 insertions(+), 15 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index cbc64a1..17f3299 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -1360,50 +1360,94 @@ static void p2m_free_vmid(struct domain *d) spin_unlock(&vmid_alloc_lock); } -void p2m_teardown(struct domain *d) +/* Reset this p2m table to be empty */ +void p2m_flush_table(struct p2m_domain *p2m) { - struct p2m_domain *p2m = &d->arch.p2m; - struct page_info *pg; + struct page_info *page, *pg; + unsigned int i; + + page = p2m->root; + + /* Clear all concatenated first level pages */ + for ( i = 0; i < P2M_ROOT_PAGES; i++ ) + clear_and_clean_page(page + i); + /* Free the rest of the trie pages back to the paging pool */ while ( (pg = page_list_remove_head(&p2m->pages)) ) free_domheap_page(pg); +} + +static inline void p2m_free_one(struct p2m_domain *p2m) +{ + p2m_flush_table(p2m); + + /* Free VMID and reset VTTBR */ + p2m_free_vmid(p2m->domain); + p2m->vttbr.vttbr = INVALID_VTTBR; if ( p2m->root ) free_domheap_pages(p2m->root, P2M_ROOT_ORDER); p2m->root = NULL; - p2m_free_vmid(d); - radix_tree_destroy(&p2m->mem_access_settings, NULL); } -int p2m_init(struct domain *d) +int p2m_init_one(struct domain *d, struct p2m_domain *p2m) { - struct p2m_domain *p2m = &d->arch.p2m; int rc = 0; rwlock_init(&p2m->lock); INIT_PAGE_LIST_HEAD(&p2m->pages); - p2m->vmid = INVALID_VMID; - rc = p2m_alloc_vmid(d); if ( rc != 0 ) return rc; - p2m->max_mapped_gfn = _gfn(0); - p2m->lowest_mapped_gfn = _gfn(ULONG_MAX); - - p2m->default_access = p2m_access_rwx; + p2m->domain = d; + p2m->access_required = false; p2m->mem_access_enabled = false; + p2m->default_access = p2m_access_rwx; + p2m->root = NULL; + p2m->max_mapped_gfn = _gfn(0); + p2m->lowest_mapped_gfn = INVALID_GFN; + p2m->vttbr.vttbr = INVALID_VTTBR; radix_tree_init(&p2m->mem_access_settings); - rc = p2m_alloc_table(d); - return rc; } +static void p2m_teardown_hostp2m(struct domain *d) +{ + struct p2m_domain *p2m = p2m_get_hostp2m(d); + + p2m_free_one(p2m); +} + +void p2m_teardown(struct domain *d) +{ + p2m_teardown_hostp2m(d); +} + +static int p2m_init_hostp2m(struct domain *d) +{ + int rc; + struct p2m_domain *p2m = p2m_get_hostp2m(d); + + p2m->p2m_class = p2m_host; + + rc = p2m_init_one(d, p2m); + if ( rc ) + return rc; + + return p2m_alloc_table(d); +} + +int p2m_init(struct domain *d) +{ + return p2m_init_hostp2m(d); +} + int relinquish_p2m_mapping(struct domain *d) { struct p2m_domain *p2m = &d->arch.p2m; diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h index 5c7cd1a..1f9c370 100644 --- a/xen/include/asm-arm/p2m.h +++ b/xen/include/asm-arm/p2m.h @@ -18,6 +18,11 @@ struct domain; extern void memory_type_changed(struct domain *); +typedef enum { + p2m_host, + p2m_alternate, +} p2m_class_t; + /* Per-p2m-table state */ struct p2m_domain { /* Lock that protects updates to the p2m */ @@ -78,6 +83,12 @@ struct p2m_domain { * enough available bits to store this information. */ struct radix_tree_root mem_access_settings; + + /* Choose between: host/alternate */ + p2m_class_t p2m_class; + + /* Back pointer to domain */ + struct domain *domain; }; /* -- 2.9.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |