[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] x86 / vmx: use a 'normal' domheap page for APIC_DEFAULT_PHYS_BASE



vmx_alloc_vlapic_mapping() currently contains some very odd looking code
that allocates a MEMF_no_owner domheap page and then shares with the guest
as if it were a xenheap page. This then requires vmx_free_vlapic_mapping()
to call a special function in the mm code: free_shared_domheap_page().

By using a 'normal' domheap page (i.e. by not passing MEMF_no_owner to
alloc_domheap_page()), the odd looking code in vmx_alloc_vlapic_mapping()
can simply use get_page_and_type() to set up a writable mapping before
insertion in the P2M and vmx_free_vlapic_mapping() can simply release the
page using put_page_alloc_ref() followed by put_page_and_type(). This
then allows free_shared_domheap_page() to be purged.

There is, however, some fall-out from this simplification:

- alloc_domheap_page() will now call assign_pages() and run into the fact
  that 'max_pages' is not set until some time after domain_create(). To
  avoid an allocation failure, assign_pages() is modified to ignore the
  max_pages limit if 'creation_finished' is false. That value is not set
  to true until domain_unpause_by_systemcontroller() is called, and thus
  the guest cannot run (and hence cause memory allocation) until
  creation_finished is set to true.

- Because the domheap page is no longer a pseudo-xenheap page, the
  reference counting will prevent the domain from being destroyed. Thus
  the call to vmx_free_vlapic_mapping() is moved from the
  domain_destroy() method into the domain_relinquish_resources() method.

Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx>
---
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wl@xxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Julien Grall <julien@xxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c | 29 ++++++++++++++++++++++-------
 xen/arch/x86/mm.c          | 10 ----------
 xen/common/page_alloc.c    |  3 ++-
 xen/include/asm-x86/mm.h   |  2 --
 4 files changed, 24 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 3fd3ac61e1..a2e6081485 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -421,10 +421,6 @@ static int vmx_domain_initialise(struct domain *d)
 }
 
 static void vmx_domain_relinquish_resources(struct domain *d)
-{
-}
-
-static void vmx_domain_destroy(struct domain *d)
 {
     if ( !has_vlapic(d) )
         return;
@@ -432,6 +428,10 @@ static void vmx_domain_destroy(struct domain *d)
     vmx_free_vlapic_mapping(d);
 }
 
+static void vmx_domain_destroy(struct domain *d)
+{
+}
+
 static int vmx_vcpu_initialise(struct vcpu *v)
 {
     int rc;
@@ -3034,12 +3034,22 @@ static int vmx_alloc_vlapic_mapping(struct domain *d)
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return 0;
 
-    pg = alloc_domheap_page(d, MEMF_no_owner);
+    pg = alloc_domheap_page(d, 0);
     if ( !pg )
         return -ENOMEM;
+
+    if ( !get_page_and_type(pg, d, PGT_writable_page) )
+    {
+        /*
+         * The domain can't possibly know about this page yet, so failure
+         * here is a clear indication of something fishy going on.
+         */
+        domain_crash(d);
+        return -ENODATA;
+    }
+
     mfn = page_to_mfn(pg);
     clear_domain_page(mfn);
-    share_xen_page_with_guest(pg, d, SHARE_rw);
     d->arch.hvm.vmx.apic_access_mfn = mfn;
 
     return set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
@@ -3052,7 +3062,12 @@ static void vmx_free_vlapic_mapping(struct domain *d)
     mfn_t mfn = d->arch.hvm.vmx.apic_access_mfn;
 
     if ( !mfn_eq(mfn, INVALID_MFN) )
-        free_shared_domheap_page(mfn_to_page(mfn));
+    {
+        struct page_info *pg = mfn_to_page(mfn);
+
+        put_page_alloc_ref(pg);
+        put_page_and_type(pg);
+    }
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 654190e9e9..2a6d2e8af9 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -496,16 +496,6 @@ void share_xen_page_with_guest(struct page_info *page, 
struct domain *d,
     spin_unlock(&d->page_alloc_lock);
 }
 
-void free_shared_domheap_page(struct page_info *page)
-{
-    put_page_alloc_ref(page);
-    if ( !test_and_clear_bit(_PGC_xen_heap, &page->count_info) )
-        ASSERT_UNREACHABLE();
-    page->u.inuse.type_info = 0;
-    page_set_owner(page, NULL);
-    free_domheap_page(page);
-}
-
 void make_cr3(struct vcpu *v, mfn_t mfn)
 {
     struct domain *d = v->domain;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 919a270587..ef327072ed 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2269,7 +2269,8 @@ int assign_pages(
 
     if ( !(memflags & MEMF_no_refcount) )
     {
-        if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
+        if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) &&
+             d->creation_finished )
         {
             gprintk(XENLOG_INFO, "Over-allocation for domain %u: "
                     "%u > %u\n", d->domain_id,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 2ca8882ad0..e429f38228 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -317,8 +317,6 @@ struct page_info
 
 #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
 
-extern void free_shared_domheap_page(struct page_info *page);
-
 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
 extern unsigned long max_page;
 extern unsigned long total_pages;
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.