[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-next v2 09/10] x86/domain: move PV specific code to pv/domain.c



Move all the PV specific code along with the supporting code to
pv/domain.c.

This in turn requires exporting a few functions in header files. Create
pv/domain.h for that.

No functional change.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/domain.c           | 214 ++----------------------------------
 xen/arch/x86/pv/Makefile        |   1 +
 xen/arch/x86/pv/domain.c        | 232 ++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/pv/domain.h |  30 ++++++
 4 files changed, 270 insertions(+), 207 deletions(-)
 create mode 100644 xen/arch/x86/pv/domain.c
 create mode 100644 xen/include/asm-x86/pv/domain.h

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f5e917431d..7c83fec8fc 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -63,6 +63,7 @@
 #include <xen/iommu.h>
 #include <compat/vcpu.h>
 #include <asm/psr.h>
+#include <asm/pv/domain.h>
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
 
@@ -70,9 +71,6 @@ static void default_idle(void);
 void (*pm_idle) (void) __read_mostly = default_idle;
 void (*dead_idle) (void) __read_mostly = default_dead_idle;
 
-static void paravirt_ctxt_switch_from(struct vcpu *v);
-static void paravirt_ctxt_switch_to(struct vcpu *v);
-
 static void default_idle(void)
 {
     local_irq_disable();
@@ -145,13 +143,6 @@ static void noreturn continue_idle_domain(struct vcpu *v)
     reset_stack_and_jump(idle_loop);
 }
 
-static void noreturn continue_nonidle_domain(struct vcpu *v)
-{
-    check_wakeup_from_wait();
-    mark_regs_dirty(guest_cpu_user_regs());
-    reset_stack_and_jump(ret_from_intr);
-}
-
 void dump_pageframe_info(struct domain *d)
 {
     struct page_info *page;
@@ -313,135 +304,6 @@ void free_vcpu_struct(struct vcpu *v)
     free_xenheap_page(v);
 }
 
-static int setup_compat_l4(struct vcpu *v)
-{
-    struct page_info *pg;
-    l4_pgentry_t *l4tab;
-
-    pg = alloc_domheap_page(v->domain, MEMF_no_owner);
-    if ( pg == NULL )
-        return -ENOMEM;
-
-    /* This page needs to look like a pagetable so that it can be shadowed */
-    pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
-
-    l4tab = __map_domain_page(pg);
-    clear_page(l4tab);
-    init_guest_l4_table(l4tab, v->domain, 1);
-    unmap_domain_page(l4tab);
-
-    v->arch.guest_table = pagetable_from_page(pg);
-    v->arch.guest_table_user = v->arch.guest_table;
-
-    return 0;
-}
-
-static void release_compat_l4(struct vcpu *v)
-{
-    if ( !pagetable_is_null(v->arch.guest_table) )
-        free_domheap_page(pagetable_get_page(v->arch.guest_table));
-    v->arch.guest_table = pagetable_null();
-    v->arch.guest_table_user = pagetable_null();
-}
-
-int switch_compat(struct domain *d)
-{
-    struct vcpu *v;
-    int rc;
-
-    if ( is_hvm_domain(d) || d->tot_pages != 0 )
-        return -EACCES;
-    if ( is_pv_32bit_domain(d) )
-        return 0;
-
-    d->arch.has_32bit_shinfo = 1;
-    if ( is_pv_domain(d) )
-        d->arch.is_32bit_pv = 1;
-
-    for_each_vcpu( d, v )
-    {
-        rc = setup_compat_arg_xlat(v);
-        if ( !rc )
-            rc = setup_compat_l4(v);
-
-        if ( rc )
-            goto undo_and_fail;
-    }
-
-    domain_set_alloc_bitsize(d);
-    recalculate_cpuid_policy(d);
-
-    d->arch.x87_fip_width = 4;
-
-    return 0;
-
- undo_and_fail:
-    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-    for_each_vcpu( d, v )
-    {
-        free_compat_arg_xlat(v);
-        release_compat_l4(v);
-    }
-
-    return rc;
-}
-
-static int pv_create_gdt_ldt_l1tab(struct domain *d, struct vcpu *v)
-{
-    return create_perdomain_mapping(d, GDT_VIRT_START(v),
-                                    1 << GDT_LDT_VCPU_SHIFT,
-                                    d->arch.pv_domain.gdt_ldt_l1tab, NULL);
-}
-
-static void pv_destroy_gdt_ldt_l1tab(struct domain *d, struct vcpu *v)
-{
-    destroy_perdomain_mapping(d, GDT_VIRT_START(v), 1 << GDT_LDT_VCPU_SHIFT);
-}
-
-static void pv_vcpu_destroy(struct vcpu *v);
-static int pv_vcpu_initialise(struct vcpu *v)
-{
-    struct domain *d = v->domain;
-    int rc;
-
-    ASSERT(!is_idle_domain(d));
-
-    spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
-
-    rc = pv_create_gdt_ldt_l1tab(d, v);
-    if ( rc )
-        goto done;
-
-    BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
-                 PAGE_SIZE);
-    v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
-                                              NR_VECTORS);
-    if ( !v->arch.pv_vcpu.trap_ctxt )
-    {
-        rc = -ENOMEM;
-        goto done;
-    }
-
-    /* PV guests by default have a 100Hz ticker. */
-    v->periodic_period = MILLISECS(10);
-
-    v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
-
-    if ( is_pv_32bit_domain(d) )
-    {
-        if ( (rc = setup_compat_arg_xlat(v)) )
-            goto done;
-
-        if ( (rc = setup_compat_l4(v)) )
-            goto done;
-    }
-
- done:
-    if ( rc )
-        pv_vcpu_destroy(v);
-    return rc;
-}
-
 int vcpu_initialise(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -486,19 +348,6 @@ int vcpu_initialise(struct vcpu *v)
     return rc;
 }
 
-static void pv_vcpu_destroy(struct vcpu *v)
-{
-    if ( is_pv_32bit_vcpu(v) )
-    {
-        free_compat_arg_xlat(v);
-        release_compat_l4(v);
-    }
-
-    pv_destroy_gdt_ldt_l1tab(v->domain, v);
-    xfree(v->arch.pv_vcpu.trap_ctxt);
-    v->arch.pv_vcpu.trap_ctxt = NULL;
-}
-
 void vcpu_destroy(struct vcpu *v)
 {
     xfree(v->arch.vm_event);
@@ -536,59 +385,8 @@ static bool emulation_flags_ok(const struct domain *d, 
uint32_t emflags)
     return true;
 }
 
-static void pv_domain_destroy(struct domain *d)
-{
-    destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
-                              GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
-    xfree(d->arch.pv_domain.cpuidmasks);
-    d->arch.pv_domain.cpuidmasks = NULL;
-    free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
-    d->arch.pv_domain.gdt_ldt_l1tab = NULL;
-}
-
-static int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
-                                struct xen_arch_domainconfig *config)
-{
-    static const struct arch_csw pv_csw = {
-        .from = paravirt_ctxt_switch_from,
-        .to   = paravirt_ctxt_switch_to,
-        .tail = continue_nonidle_domain,
-    };
-    int rc = -ENOMEM;
-
-    d->arch.pv_domain.gdt_ldt_l1tab =
-        alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
-    if ( !d->arch.pv_domain.gdt_ldt_l1tab )
-        goto fail;
-    clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
-
-    if ( levelling_caps & ~LCAP_faulting )
-    {
-        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
-        if ( !d->arch.pv_domain.cpuidmasks )
-            goto fail;
-        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
-    }
-
-    rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
-                                  GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
-                                  NULL, NULL);
-    if ( rc )
-        goto fail;
-
-    d->arch.ctxt_switch = &pv_csw;
-
-    /* 64-bit PV guest by default. */
-    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-
-    return 0;
-
- fail:
-    pv_domain_destroy(d);
-
-    return rc;
-}
-
+void paravirt_ctxt_switch_from(struct vcpu *v);
+void paravirt_ctxt_switch_to(struct vcpu *v);
 int arch_domain_create(struct domain *d, unsigned int domcr_flags,
                        struct xen_arch_domainconfig *config)
 {
@@ -1919,7 +1717,8 @@ static void save_segments(struct vcpu *v)
 
 #define switch_kernel_stack(v) ((void)0)
 
-static void paravirt_ctxt_switch_from(struct vcpu *v)
+/* Needed by PV guests */
+void paravirt_ctxt_switch_from(struct vcpu *v)
 {
     save_segments(v);
 
@@ -1933,7 +1732,8 @@ static void paravirt_ctxt_switch_from(struct vcpu *v)
         write_debugreg(7, 0);
 }
 
-static void paravirt_ctxt_switch_to(struct vcpu *v)
+/* Needed by PV guests */
+void paravirt_ctxt_switch_to(struct vcpu *v)
 {
     unsigned long cr4;
 
diff --git a/xen/arch/x86/pv/Makefile b/xen/arch/x86/pv/Makefile
index ea94599438..2737824e81 100644
--- a/xen/arch/x86/pv/Makefile
+++ b/xen/arch/x86/pv/Makefile
@@ -1,2 +1,3 @@
 obj-y += hypercall.o
 obj-bin-y += dom0_build.init.o
+obj-y += domain.o
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
new file mode 100644
index 0000000000..562c3d03f5
--- /dev/null
+++ b/xen/arch/x86/pv/domain.c
@@ -0,0 +1,232 @@
+/******************************************************************************
+ * arch/x86/pv/domain.c
+ *
+ * PV domain handling
+ */
+
+/*
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *  Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ */
+
+#include <xen/domain_page.h>
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+
+static void noreturn continue_nonidle_domain(struct vcpu *v)
+{
+    check_wakeup_from_wait();
+    mark_regs_dirty(guest_cpu_user_regs());
+    reset_stack_and_jump(ret_from_intr);
+}
+
+static int setup_compat_l4(struct vcpu *v)
+{
+    struct page_info *pg;
+    l4_pgentry_t *l4tab;
+
+    pg = alloc_domheap_page(v->domain, MEMF_no_owner);
+    if ( pg == NULL )
+        return -ENOMEM;
+
+    /* This page needs to look like a pagetable so that it can be shadowed */
+    pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
+
+    l4tab = __map_domain_page(pg);
+    clear_page(l4tab);
+    init_guest_l4_table(l4tab, v->domain, 1);
+    unmap_domain_page(l4tab);
+
+    v->arch.guest_table = pagetable_from_page(pg);
+    v->arch.guest_table_user = v->arch.guest_table;
+
+    return 0;
+}
+
+static void release_compat_l4(struct vcpu *v)
+{
+    if ( !pagetable_is_null(v->arch.guest_table) )
+        free_domheap_page(pagetable_get_page(v->arch.guest_table));
+    v->arch.guest_table = pagetable_null();
+    v->arch.guest_table_user = pagetable_null();
+}
+
+int switch_compat(struct domain *d)
+{
+    struct vcpu *v;
+    int rc;
+
+    if ( is_hvm_domain(d) || d->tot_pages != 0 )
+        return -EACCES;
+    if ( is_pv_32bit_domain(d) )
+        return 0;
+
+    d->arch.has_32bit_shinfo = 1;
+    if ( is_pv_domain(d) )
+        d->arch.is_32bit_pv = 1;
+
+    for_each_vcpu( d, v )
+    {
+        rc = setup_compat_arg_xlat(v);
+        if ( !rc )
+            rc = setup_compat_l4(v);
+
+        if ( rc )
+            goto undo_and_fail;
+    }
+
+    domain_set_alloc_bitsize(d);
+    recalculate_cpuid_policy(d);
+
+    d->arch.x87_fip_width = 4;
+
+    return 0;
+
+ undo_and_fail:
+    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+    for_each_vcpu( d, v )
+    {
+        free_compat_arg_xlat(v);
+        release_compat_l4(v);
+    }
+
+    return rc;
+}
+
+static int pv_create_gdt_ldt_l1tab(struct domain *d, struct vcpu *v)
+{
+    return create_perdomain_mapping(d, GDT_VIRT_START(v),
+                                    1 << GDT_LDT_VCPU_SHIFT,
+                                    d->arch.pv_domain.gdt_ldt_l1tab, NULL);
+}
+
+static void pv_destroy_gdt_ldt_l1tab(struct domain *d, struct vcpu *v)
+{
+    destroy_perdomain_mapping(d, GDT_VIRT_START(v), 1 << GDT_LDT_VCPU_SHIFT);
+}
+
+void pv_vcpu_destroy(struct vcpu *v)
+{
+    if ( is_pv_32bit_vcpu(v) )
+    {
+        free_compat_arg_xlat(v);
+        release_compat_l4(v);
+    }
+
+    pv_destroy_gdt_ldt_l1tab(v->domain, v);
+    xfree(v->arch.pv_vcpu.trap_ctxt);
+    v->arch.pv_vcpu.trap_ctxt = NULL;
+}
+
+int pv_vcpu_initialise(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    int rc;
+
+    ASSERT(!is_idle_domain(d));
+
+    spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+    rc = pv_create_gdt_ldt_l1tab(d, v);
+    if ( rc )
+        goto done;
+
+    BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
+                 PAGE_SIZE);
+    v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
+                                              NR_VECTORS);
+    if ( !v->arch.pv_vcpu.trap_ctxt )
+    {
+        rc = -ENOMEM;
+        goto done;
+    }
+
+    /* PV guests by default have a 100Hz ticker. */
+    v->periodic_period = MILLISECS(10);
+
+    v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
+
+    if ( is_pv_32bit_domain(d) )
+    {
+        if ( (rc = setup_compat_arg_xlat(v)) )
+            goto done;
+
+        if ( (rc = setup_compat_l4(v)) )
+            goto done;
+    }
+
+ done:
+    if ( rc )
+        pv_vcpu_destroy(v);
+    return rc;
+}
+
+void pv_domain_destroy(struct domain *d)
+{
+    destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
+                              GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
+    xfree(d->arch.pv_domain.cpuidmasks);
+    d->arch.pv_domain.cpuidmasks = NULL;
+    free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+    d->arch.pv_domain.gdt_ldt_l1tab = NULL;
+}
+
+
+extern void paravirt_ctxt_switch_from(struct vcpu *v);
+extern void paravirt_ctxt_switch_to(struct vcpu *v);
+
+int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
+                         struct xen_arch_domainconfig *config)
+{
+    static const struct arch_csw pv_csw = {
+        .from = paravirt_ctxt_switch_from,
+        .to   = paravirt_ctxt_switch_to,
+        .tail = continue_nonidle_domain,
+    };
+    int rc = -ENOMEM;
+
+    d->arch.pv_domain.gdt_ldt_l1tab =
+        alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
+    if ( !d->arch.pv_domain.gdt_ldt_l1tab )
+        goto fail;
+    clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+
+    if ( levelling_caps & ~LCAP_faulting )
+    {
+        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+        if ( !d->arch.pv_domain.cpuidmasks )
+            goto fail;
+        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+    }
+
+    rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
+                                  GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
+                                  NULL, NULL);
+    if ( rc )
+        goto fail;
+
+    d->arch.ctxt_switch = &pv_csw;
+
+    /* 64-bit PV guest by default. */
+    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+
+    return 0;
+
+  fail:
+    pv_domain_destroy(d);
+
+    return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/pv/domain.h b/xen/include/asm-x86/pv/domain.h
new file mode 100644
index 0000000000..6288ae24df
--- /dev/null
+++ b/xen/include/asm-x86/pv/domain.h
@@ -0,0 +1,30 @@
+/*
+ * pv/domain.h
+ *
+ * PV guest interface definitions
+ *
+ * Copyright (C) 2017 Wei Liu <wei.liu2@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms and conditions of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __X86_PV_DOMAIN_H__
+#define __X86_PV_DOMAIN_H__
+
+void pv_vcpu_destroy(struct vcpu *v);
+int pv_vcpu_initialise(struct vcpu *v);
+void pv_domain_destroy(struct domain *d);
+int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
+                         struct xen_arch_domainconfig *config);
+
+#endif /* __X86_PV_DOMAIN_H__ */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.