[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/6] x86: introduce alloc_vcpu_guest_context()



This is necessary because on x86-64 struct vcpu_guest_context is larger
than PAGE_SIZE, and hence not suitable for a general purpose runtime
allocation. On x86-32, FIX_PAE_HIGHMEM_* fixmap entries are being
re-used, whiule on x86-64 new per-CPU fixmap entries get introduced.
The implication of using per-CPU fixmaps is that these allocations have
to happen from non-preemptable hypercall context (which they all do).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -45,6 +45,7 @@
 #include <asm/mpspec.h>
 #include <asm/ldt.h>
 #include <asm/hypercall.h>
+#include <asm/fixmap.h>
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/support.h>
 #include <asm/debugreg.h>
@@ -221,6 +222,53 @@ void free_vcpu_struct(struct vcpu *v)
     free_xenheap_page(v);
 }
 
+static DEFINE_PER_CPU(struct page_info *[
+    PFN_UP(sizeof(struct vcpu_guest_context))], vgc_pages);
+
+struct vcpu_guest_context *alloc_vcpu_guest_context(void)
+{
+    unsigned int i, cpu = smp_processor_id();
+    enum fixed_addresses idx = FIX_VGC_BEGIN -
+        cpu * PFN_UP(sizeof(struct vcpu_guest_context));
+
+#ifdef __i386__
+    BUILD_BUG_ON(sizeof(struct vcpu_guest_context) > PAGE_SIZE);
+#endif
+    BUG_ON(per_cpu(vgc_pages[0], cpu) != NULL);
+
+    for ( i = 0; i < PFN_UP(sizeof(struct vcpu_guest_context)); ++i )
+    {
+        struct page_info *pg = alloc_domheap_page(NULL, 0);
+
+        if ( unlikely(pg == NULL) )
+        {
+            free_vcpu_guest_context(NULL);
+            return NULL;
+        }
+        __set_fixmap(idx - i, page_to_mfn(pg), __PAGE_HYPERVISOR);
+        per_cpu(vgc_pages[i], cpu) = pg;
+    }
+    return (void *)fix_to_virt(idx);
+}
+
+void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
+{
+    unsigned int i, cpu = smp_processor_id();
+    enum fixed_addresses idx = FIX_VGC_BEGIN -
+        cpu * PFN_UP(sizeof(struct vcpu_guest_context));
+
+    BUG_ON(vgc && vgc != (void *)fix_to_virt(idx));
+
+    for ( i = 0; i < PFN_UP(sizeof(struct vcpu_guest_context)); ++i )
+    {
+        if ( !per_cpu(vgc_pages[i], cpu) )
+            continue;
+        __set_fixmap(idx - i, 0, 0);
+        free_domheap_page(per_cpu(vgc_pages[i], cpu));
+        per_cpu(vgc_pages[i], cpu) = NULL;
+    }
+}
+
 #ifdef __x86_64__
 
 static int setup_compat_l4(struct vcpu *v)
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -832,12 +832,12 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
         if ( v->vcpu_info == &dummy_vcpu_info )
             return -EINVAL;
 
-        if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
+        if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
             return -ENOMEM;
 
         if ( copy_from_guest(ctxt, arg, 1) )
         {
-            xfree(ctxt);
+            free_vcpu_guest_context(ctxt);
             return -EFAULT;
         }
 
@@ -847,7 +847,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
             rc = boot_vcpu(d, vcpuid, ctxt);
         domain_unlock(d);
 
-        xfree(ctxt);
+        free_vcpu_guest_context(ctxt);
         break;
 
     case VCPUOP_up:
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -295,7 +295,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
                      < sizeof(struct compat_vcpu_guest_context));
 #endif
         ret = -ENOMEM;
-        if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
+        if ( (c.nat = alloc_vcpu_guest_context()) == NULL )
             goto svc_out;
 
 #ifdef CONFIG_COMPAT
@@ -318,7 +318,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
         }
 
     svc_out:
-        xfree(c.nat);
+        free_vcpu_guest_context(c.nat);
         rcu_unlock_domain(d);
     }
     break;
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -25,6 +25,9 @@ struct vcpu;
 extern void relinquish_vcpu_resources(struct vcpu *v);
 extern int vcpu_late_initialise(struct vcpu *v);
 
+#define alloc_vcpu_guest_context() xmalloc(struct vcpu_guest_context)
+#define free_vcpu_guest_context(vgc) xfree(vgc)
+
 /* given a current domain metaphysical address, return the physical address */
 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
                                              struct p2m_entry* entry);
--- a/xen/include/asm-x86/fixmap.h
+++ b/xen/include/asm-x86/fixmap.h
@@ -16,6 +16,7 @@
 #include <asm/apicdef.h>
 #include <asm/acpi.h>
 #include <asm/page.h>
+#include <xen/pfn.h>
 #include <xen/kexec.h>
 #include <xen/iommu.h>
 #include <asm/amd-iommu.h>
@@ -34,6 +35,12 @@ enum fixed_addresses {
 #ifdef __i386__
     FIX_PAE_HIGHMEM_0,
     FIX_PAE_HIGHMEM_END = FIX_PAE_HIGHMEM_0 + NR_CPUS-1,
+#define FIX_VGC_END FIX_PAE_HIGHMEM_0
+#define FIX_VGC_BEGIN FIX_PAE_HIGHMEM_END
+#else
+    FIX_VGC_END,
+    FIX_VGC_BEGIN = FIX_VGC_END
+      + PFN_UP(sizeof(struct vcpu_guest_context)) * NR_CPUS - 1,
 #endif
     FIX_APIC_BASE,
     FIX_IO_APIC_BASE_0,
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -32,6 +32,12 @@ void free_domain_struct(struct domain *d
 struct vcpu *alloc_vcpu_struct(void);
 void free_vcpu_struct(struct vcpu *v);
 
+/* Allocate/free a vcpu_guest_context structure. */
+#ifndef alloc_vcpu_guest_context
+struct vcpu_guest_context *alloc_vcpu_guest_context(void);
+void free_vcpu_guest_context(struct vcpu_guest_context *);
+#endif
+
 /*
  * Initialise/destroy arch-specific details of a VCPU.
  *  - vcpu_initialise() is called after the basic generic fields of the


Attachment: x86-alloc-vcpu-guest-context.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.