|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-next 6/8] x86/domain: factor out pv_domain_initialise
Lump everything PV related in arch_domain_create into
pv_domain_initialise.
Though domcr_flags is not necessary, the new function is given one to
match hvm counterpart.
Since it cleans up after itself there is no need to clean up in
arch_domain_create in case of failure.
No functional change.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/arch/x86/domain.c | 97 +++++++++++++++++++++++++++++++--------------------
1 file changed, 60 insertions(+), 37 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 05885a103d..ed16adf77a 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -533,6 +533,58 @@ static void pv_domain_destroy(struct domain *d)
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
}
+static int pv_domain_initialise(struct domain *d, unsigned int domcr_flags)
+{
+ static const struct arch_csw pv_csw = {
+ .from = paravirt_ctxt_switch_from,
+ .to = paravirt_ctxt_switch_to,
+ .tail = continue_nonidle_domain,
+ };
+ int rc = -ENOMEM;
+
+ d->arch.pv_domain.gdt_ldt_l1tab =
+ alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
+ if ( !d->arch.pv_domain.gdt_ldt_l1tab )
+ goto fail;
+ clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+
+ if ( levelling_caps & ~LCAP_faulting )
+ {
+ d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+ if ( !d->arch.pv_domain.cpuidmasks )
+ goto fail;
+ *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+ }
+
+ rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
+ GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
+ NULL, NULL);
+ if ( rc )
+ goto fail;
+
+ d->arch.ctxt_switch = &pv_csw;
+
+ /* 64-bit PV guest by default. */
+ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+
+ return 0;
+
+fail:
+ if ( d->arch.pv_domain.gdt_ldt_l1tab )
+ {
+ free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ d->arch.pv_domain.gdt_ldt_l1tab = NULL;
+ }
+
+ if ( d->arch.pv_domain.cpuidmasks )
+ {
+ xfree(d->arch.pv_domain.cpuidmasks);
+ d->arch.pv_domain.cpuidmasks = NULL;
+ }
+
+ return rc;
+}
+
int arch_domain_create(struct domain *d, unsigned int domcr_flags,
struct xen_arch_domainconfig *config)
{
@@ -593,30 +645,7 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
d->arch.emulation_flags = emflags;
}
- if ( is_idle_domain(d) )
- rc = 0;
- else
- {
- d->arch.pv_domain.gdt_ldt_l1tab =
- alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
- if ( !d->arch.pv_domain.gdt_ldt_l1tab )
- goto fail;
- clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
-
- if ( levelling_caps & ~LCAP_faulting )
- {
- d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
- if ( !d->arch.pv_domain.cpuidmasks )
- goto fail;
- *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
- }
-
- rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
- GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
- NULL, NULL);
- }
- if ( rc )
- goto fail;
+ rc = 0;
mapcache_domain_init(d);
@@ -665,23 +694,20 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
if ( (rc = hvm_domain_initialise(d, domcr_flags)) != 0 )
goto fail;
}
- else
+ else if ( is_idle_domain(d) )
{
- static const struct arch_csw pv_csw = {
- .from = paravirt_ctxt_switch_from,
- .to = paravirt_ctxt_switch_to,
- .tail = continue_nonidle_domain,
- };
static const struct arch_csw idle_csw = {
.from = paravirt_ctxt_switch_from,
.to = paravirt_ctxt_switch_to,
.tail = continue_idle_domain,
};
- d->arch.ctxt_switch = is_idle_domain(d) ? &idle_csw : &pv_csw;
-
- /* 64-bit PV guest by default. */
- d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+ d->arch.ctxt_switch = &idle_csw;
+ }
+ else
+ {
+ if ( (rc = pv_domain_initialise(d, domcr_flags)) != 0 )
+ goto fail;
}
/* initialize default tsc behavior in case tools don't */
@@ -709,9 +735,6 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
if ( paging_initialised )
paging_final_teardown(d);
free_perdomain_mappings(d);
- if ( is_pv_domain(d) )
- pv_domain_destroy(d);
-
return rc;
}
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |