WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] hvm: Xen must take care to hold a referen

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] hvm: Xen must take care to hold a reference to ioreq pages, to ensure
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 13 Apr 2007 10:50:30 -0700
Delivery-date: Fri, 13 Apr 2007 11:23:07 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1176463746 -3600
# Node ID ba8d4bc2435a742f74909c3fc9efab1655aae41f
# Parent  0b14423e75f8e5207c87b0d5de1a40dcf7713002
hvm: Xen must take care to hold a reference to ioreq pages, to ensure
that domain runs only when it has valid mapped ioreq pages, and to
safely drop ioreq page references when a domain dies.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/domain.c             |    4 +
 xen/arch/x86/hvm/hvm.c            |  106 ++++++++++++++++++++++++++------------
 xen/arch/x86/x86_32/domain_page.c |   21 +++++++
 xen/common/domain.c               |    2 
 xen/include/asm-x86/hvm/hvm.h     |    1 
 xen/include/xen/domain_page.h     |   13 +++-
 6 files changed, 111 insertions(+), 36 deletions(-)

diff -r 0b14423e75f8 -r ba8d4bc2435a xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Apr 13 12:01:37 2007 +0100
+++ b/xen/arch/x86/domain.c     Fri Apr 13 12:29:06 2007 +0100
@@ -1540,8 +1540,10 @@ void domain_relinquish_resources(struct 
     relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
     relinquish_memory(d, &d->page_list, PGT_l2_page_table);
 
-    /* Free page used by xen oprofile buffer */
+    /* Free page used by xen oprofile buffer. */
     free_xenoprof_pages(d);
+
+    hvm_domain_relinquish_resources(d);
 }
 
 void arch_dump_domain_info(struct domain *d)
diff -r 0b14423e75f8 -r ba8d4bc2435a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Apr 13 12:01:37 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri Apr 13 12:29:06 2007 +0100
@@ -146,6 +146,59 @@ void hvm_do_resume(struct vcpu *v)
     }
 }
 
+static void hvm_clear_ioreq_pfn(
+    struct domain *d, unsigned long *pva)
+{
+    unsigned long va, mfn;
+
+    BUG_ON(!d->is_dying);
+
+    if ( (va = xchg(pva, 0UL)) == 0UL )
+        return;
+
+    mfn = mfn_from_mapped_domain_page((void *)va);
+    unmap_domain_page_global((void *)va);
+    put_page_and_type(mfn_to_page(mfn));
+}
+
+static int hvm_set_ioreq_pfn(
+    struct domain *d, unsigned long *pva, unsigned long gmfn)
+{
+    unsigned long mfn;
+    void *va;
+
+    mfn = gmfn_to_mfn(d, gmfn);
+    if ( !mfn_valid(mfn) ||
+         !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+        return -EINVAL;
+
+    va = map_domain_page_global(mfn);
+    if ( va == NULL )
+    {
+        put_page_and_type(mfn_to_page(mfn));
+        return -ENOMEM;
+    }
+
+    if ( cmpxchg(pva, 0UL, (unsigned long)va) != 0UL )
+    {
+        unmap_domain_page_global(va);
+        put_page_and_type(mfn_to_page(mfn));
+        return -EINVAL;
+    }
+
+    /*
+     * Check dying status /after/ setting *pva. cmpxchg() is a barrier.
+     * We race against hvm_domain_relinquish_resources(). 
+     */
+    if ( d->is_dying )
+        hvm_clear_ioreq_pfn(d, pva);
+
+    /* Balance the domain_pause() in hvm_domain_initialise(). */
+    domain_unpause(d);
+
+    return 0;
+}
+
 int hvm_domain_initialise(struct domain *d)
 {
     int rc;
@@ -161,7 +214,6 @@ int hvm_domain_initialise(struct domain 
     spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
 
-    /* paging support will be determined inside paging.c */
     rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
     if ( rc != 0 )
         return rc;
@@ -169,7 +221,17 @@ int hvm_domain_initialise(struct domain 
     vpic_init(d);
     vioapic_init(d);
 
+    /* Do not allow domain to run until it has ioreq shared pages. */
+    domain_pause(d); /* HVM_PARAM_IOREQ_PFN */
+    domain_pause(d); /* HVM_PARAM_BUFIOREQ_PFN */
+
     return 0;
+}
+
+void hvm_domain_relinquish_resources(struct domain *d)
+{
+    hvm_clear_ioreq_pfn(d, &d->arch.hvm_domain.shared_page_va);
+    hvm_clear_ioreq_pfn(d, &d->arch.hvm_domain.buffered_io_va);
 }
 
 void hvm_domain_destroy(struct domain *d)
@@ -178,13 +240,6 @@ void hvm_domain_destroy(struct domain *d
     rtc_deinit(d);
     pmtimer_deinit(d);
     hpet_deinit(d);
-
-    if ( d->arch.hvm_domain.shared_page_va )
-        unmap_domain_page_global(
-            (void *)d->arch.hvm_domain.shared_page_va);
-
-    if ( d->arch.hvm_domain.buffered_io_va )
-        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
 }
 
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
@@ -928,8 +983,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         struct xen_hvm_param a;
         struct domain *d;
         struct vcpu *v;
-        unsigned long mfn;
-        void *p;
 
         if ( copy_from_guest(&a, arg, 1) )
             return -EFAULT;
@@ -956,30 +1009,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
             switch ( a.index )
             {
             case HVM_PARAM_IOREQ_PFN:
-                if ( d->arch.hvm_domain.shared_page_va )
-                    goto param_fail;
-                mfn = gmfn_to_mfn(d, a.value);
-                if ( mfn == INVALID_MFN )
-                    goto param_fail;
-                p = map_domain_page_global(mfn);
-                if ( p == NULL )
-                    goto param_fail;
-                d->arch.hvm_domain.shared_page_va = (unsigned long)p;
-                /* Initialise evtchn port info if VCPUs already created. */
-                for_each_vcpu ( d, v )
-                    get_vio(d, v->vcpu_id)->vp_eport =
-                    v->arch.hvm_vcpu.xen_port;
+                rc = hvm_set_ioreq_pfn(
+                    d, &d->arch.hvm_domain.shared_page_va, a.value);
+                if ( rc == 0 )
+                {
+                    /* Initialise evtchn port info if VCPUs already created. */
+                    for_each_vcpu ( d, v )
+                        get_vio(d, v->vcpu_id)->vp_eport =
+                        v->arch.hvm_vcpu.xen_port;
+                }
                 break;
             case HVM_PARAM_BUFIOREQ_PFN:
-                if ( d->arch.hvm_domain.buffered_io_va )
-                    goto param_fail;
-                mfn = gmfn_to_mfn(d, a.value);
-                if ( mfn == INVALID_MFN )
-                    goto param_fail;
-                p = map_domain_page_global(mfn);
-                if ( p == NULL )
-                    goto param_fail;
-                d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
+                rc = hvm_set_ioreq_pfn(
+                    d, &d->arch.hvm_domain.buffered_io_va, a.value);
                 break;
             case HVM_PARAM_CALLBACK_IRQ:
                 hvm_set_callback_via(d, a.value);
diff -r 0b14423e75f8 -r ba8d4bc2435a xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Fri Apr 13 12:01:37 2007 +0100
+++ b/xen/arch/x86/x86_32/domain_page.c Fri Apr 13 12:29:06 2007 +0100
@@ -251,3 +251,24 @@ void unmap_domain_page_global(void *va)
     idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
     set_bit(idx, garbage);
 }
+
+unsigned long mfn_from_mapped_domain_page(void *va) 
+{
+    unsigned long __va = (unsigned long)va;
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+    unsigned int idx;
+    struct mapcache *cache;
+
+    if ( (__va >= MAPCACHE_VIRT_START) && (__va < MAPCACHE_VIRT_END) )
+    {
+        cache = &mapcache_current_vcpu()->domain->arch.mapcache;
+        idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
+        return l1e_get_pfn(cache->l1tab[idx]);
+    }
+
+    ASSERT(__va >= IOREMAP_VIRT_START);
+    pl2e = virt_to_xen_l2e(__va);
+    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
+    return l1e_get_pfn(*pl1e);
+}
diff -r 0b14423e75f8 -r ba8d4bc2435a xen/common/domain.c
--- a/xen/common/domain.c       Fri Apr 13 12:01:37 2007 +0100
+++ b/xen/common/domain.c       Fri Apr 13 12:29:06 2007 +0100
@@ -314,7 +314,7 @@ void domain_kill(struct domain *d)
     }
 
     /* Tear down state /after/ setting the dying flag. */
-    smp_wmb();
+    smp_mb();
 
     gnttab_release_mappings(d);
     domain_relinquish_resources(d);
diff -r 0b14423e75f8 -r ba8d4bc2435a xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri Apr 13 12:01:37 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri Apr 13 12:29:06 2007 +0100
@@ -145,6 +145,7 @@ extern struct hvm_function_table hvm_fun
 extern struct hvm_function_table hvm_funcs;
 
 int hvm_domain_initialise(struct domain *d);
+void hvm_domain_relinquish_resources(struct domain *d);
 void hvm_domain_destroy(struct domain *d);
 
 int hvm_vcpu_initialise(struct vcpu *v);
diff -r 0b14423e75f8 -r ba8d4bc2435a xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h     Fri Apr 13 12:01:37 2007 +0100
+++ b/xen/include/xen/domain_page.h     Fri Apr 13 12:29:06 2007 +0100
@@ -33,6 +33,13 @@ void unmap_domain_page(void *va);
  */
 void *map_domain_page_global(unsigned long mfn);
 void unmap_domain_page_global(void *va);
+
+/* 
+ * Convert a VA (within a page previously mapped in the context of the
+ * currently-executing VCPU via a call to map_domain_page(), or via a
+ * previous call to map_domain_page_global()) to the mapped page frame.
+ */
+unsigned long mfn_from_mapped_domain_page(void *va);
 
 #define DMCACHE_ENTRY_VALID 1U
 #define DMCACHE_ENTRY_HELD  2U
@@ -96,11 +103,13 @@ domain_mmap_cache_destroy(struct domain_
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(mfn)                maddr_to_virt((mfn)<<PAGE_SHIFT)
+#define map_domain_page(mfn)                mfn_to_virt(mfn)
 #define unmap_domain_page(va)               ((void)(va))
 
-#define map_domain_page_global(mfn)         maddr_to_virt((mfn)<<PAGE_SHIFT)
+#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
 #define unmap_domain_page_global(va)        ((void)(va))
+
+#define mfn_from_mapped_domain_page(va)     virt_to_mfn(va)
 
 struct domain_mmap_cache { 
 };

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] hvm: Xen must take care to hold a reference to ioreq pages, to ensure, Xen patchbot-unstable <=