WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

Re: [Xen-devel] [PATCH 1/3] continuable destroy domain: common part

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: Re: [Xen-devel] [PATCH 1/3] continuable destroy domain: common part
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: Fri, 31 Aug 2007 23:19:59 +0900
Cc: KRYSANS@xxxxxxxxxx, xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Fri, 31 Aug 2007 07:20:30 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20070831140609.GA23197%yamahata@xxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20070831140609.GA23197%yamahata@xxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.4.2.1i
Sorry, I sent out the old one.
Here is the correct patch.
The difference is the following hunk.

@@ -434,11 +434,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
 
             ret = -EINVAL;
             if ( d != current->domain )
-            {
-                domain_kill(d);
-                ret = 0;
-            }
-
+                ret = domain_kill(d);
         destroydomain_out:
             rcu_unlock_domain(d);
         }

thanks,


# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1188569768 -32400
# Node ID a9fc5e1662e0bf1387e63ed3b7212ffc38420155
# Parent  96f64f4c42f043e3af2db369f4b9bdb9fcef017b
XEN_DOMCTL_destroydomain hypercall frees domain resources, especially
it frees all pages of the domain.
When domain memory is very large, it takes too long resulting in
soft lockup warning message.
To prevent softlokup, make the hypercall continuable.
PATCHNAME: make_xen_domctl_destroydomain_hypercall_continuable

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 96f64f4c42f0 -r a9fc5e1662e0 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Fri Aug 31 11:21:35 2007 +0100
+++ b/tools/libxc/xc_domain.c   Fri Aug 31 23:16:08 2007 +0900
@@ -55,10 +55,14 @@ int xc_domain_destroy(int xc_handle,
 int xc_domain_destroy(int xc_handle,
                       uint32_t domid)
 {
+    int ret;
     DECLARE_DOMCTL;
     domctl.cmd = XEN_DOMCTL_destroydomain;
     domctl.domain = (domid_t)domid;
-    return do_domctl(xc_handle, &domctl);
+    do {
+        ret = do_domctl(xc_handle, &domctl);
+    } while (ret && errno == EAGAIN);
+    return ret;
 }
 
 int xc_domain_shutdown(int xc_handle,
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/arch/ia64/xen/domain.c        Fri Aug 31 23:16:08 2007 +0900
@@ -936,7 +936,7 @@ static void relinquish_memory(struct dom
     spin_unlock_recursive(&d->page_alloc_lock);
 }
 
-void domain_relinquish_resources(struct domain *d)
+int domain_relinquish_resources(struct domain *d)
 {
     /* Relinquish guest resources for VT-i domain. */
     if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
@@ -954,6 +954,8 @@ void domain_relinquish_resources(struct 
 
     /* Free page used by xen oprofile buffer */
     free_xenoprof_pages(d);
+
+    return 0;
 }
 
 unsigned long
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/arch/powerpc/domain.c Fri Aug 31 23:16:08 2007 +0900
@@ -313,13 +313,13 @@ static void relinquish_memory(struct dom
     spin_unlock_recursive(&d->page_alloc_lock);
 }
 
-void domain_relinquish_resources(struct domain *d)
+int domain_relinquish_resources(struct domain *d)
 {
     relinquish_memory(d, &d->xenpage_list);
     relinquish_memory(d, &d->page_list);
     xfree(d->arch.foreign_mfns);
     xfree(d->arch.p2m);
-    return;
+    return 0;
 }
 
 void arch_dump_domain_info(struct domain *d)
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/arch/x86/domain.c     Fri Aug 31 23:16:08 2007 +0900
@@ -1717,7 +1717,7 @@ static void vcpu_destroy_pagetables(stru
     v->arch.cr3 = 0;
 }
 
-void domain_relinquish_resources(struct domain *d)
+int domain_relinquish_resources(struct domain *d)
 {
     struct vcpu *v;
 
@@ -1754,6 +1754,8 @@ void domain_relinquish_resources(struct 
 
     if ( is_hvm_domain(d) )
         hvm_domain_relinquish_resources(d);
+
+    return 0;
 }
 
 void arch_dump_domain_info(struct domain *d)
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/common/domain.c
--- a/xen/common/domain.c       Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/common/domain.c       Fri Aug 31 23:16:08 2007 +0900
@@ -250,7 +250,7 @@ struct domain *domain_create(
     return d;
 
  fail:
-    d->is_dying = 1;
+    d->is_dying = DOMDYING_dead;
     atomic_set(&d->refcnt, DOMAIN_DESTROYED);
     if ( init_status & INIT_arch )
         arch_domain_destroy(d);
@@ -310,26 +310,37 @@ struct domain *rcu_lock_domain_by_id(dom
 }
 
 
-void domain_kill(struct domain *d)
-{
-    domain_pause(d);
-
-    /* Already dying? Then bail. */
-    if ( test_and_set_bool(d->is_dying) )
-    {
-        domain_unpause(d);
-        return;
-    }
-
-    evtchn_destroy(d);
-    gnttab_release_mappings(d);
-    domain_relinquish_resources(d);
-    put_domain(d);
-
-    /* Kick page scrubbing after domain_relinquish_resources(). */
-    page_scrub_kick();
-
-    send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+int domain_kill(struct domain *d)
+{
+    int rc = 0;
+
+    if ( d == current->domain )
+        return -EINVAL;
+
+    /* Protected by domctl_lock. */
+    switch ( d->is_dying )
+    {
+    case DOMDYING_alive:
+        domain_pause(d);
+        d->is_dying = DOMDYING_dying;
+        evtchn_destroy(d);
+        gnttab_release_mappings(d);
+    case DOMDYING_dying:
+        rc = domain_relinquish_resources(d);
+        page_scrub_kick();
+        if ( rc != 0 )
+        {
+            BUG_ON(rc != -EAGAIN);
+            break;
+        }
+        d->is_dying = DOMDYING_dead;
+        put_domain(d);
+        send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+    case DOMDYING_dead:
+        break;
+    }
+
+    return rc;
 }
 
 
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/common/domctl.c       Fri Aug 31 23:16:08 2007 +0900
@@ -115,10 +115,10 @@ void getdomaininfo(struct domain *d, str
     info->cpu_time = cpu_time;
 
     info->flags = flags |
-        (d->is_dying                ? XEN_DOMINF_dying    : 0) |
-        (d->is_shut_down            ? XEN_DOMINF_shutdown : 0) |
-        (d->is_paused_by_controller ? XEN_DOMINF_paused   : 0) |
-        (d->debugger_attached       ? XEN_DOMINF_debugged : 0) |
+        ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying    : 0) |
+        (d->is_shut_down                ? XEN_DOMINF_shutdown : 0) |
+        (d->is_paused_by_controller     ? XEN_DOMINF_paused   : 0) |
+        (d->debugger_attached           ? XEN_DOMINF_debugged : 0) |
         d->shutdown_code << XEN_DOMINF_shutdownshift;
 
     if ( is_hvm_domain(d) )
@@ -434,11 +434,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
 
             ret = -EINVAL;
             if ( d != current->domain )
-            {
-                domain_kill(d);
-                ret = 0;
-            }
-
+                ret = domain_kill(d);
         destroydomain_out:
             rcu_unlock_domain(d);
         }
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/include/asm-ia64/domain.h     Fri Aug 31 23:16:08 2007 +0900
@@ -18,7 +18,6 @@ struct tlb_track;
 struct tlb_track;
 #endif
 
-extern void domain_relinquish_resources(struct domain *);
 struct vcpu;
 extern void relinquish_vcpu_resources(struct vcpu *v);
 extern void vcpu_share_privregs_with_guest(struct vcpu *v);
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/include/xen/domain.h
--- a/xen/include/xen/domain.h  Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/include/xen/domain.h  Fri Aug 31 23:16:08 2007 +0900
@@ -45,7 +45,7 @@ int arch_set_info_guest(struct vcpu *, v
 int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);
 void arch_get_info_guest(struct vcpu *, vcpu_guest_context_u);
 
-void domain_relinquish_resources(struct domain *d);
+int domain_relinquish_resources(struct domain *d);
 
 void dump_pageframe_info(struct domain *d);
 
diff -r 96f64f4c42f0 -r a9fc5e1662e0 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri Aug 31 11:21:35 2007 +0100
+++ b/xen/include/xen/sched.h   Fri Aug 31 23:16:08 2007 +0900
@@ -191,7 +191,7 @@ struct domain
     /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
     bool_t           is_polling;
     /* Is this guest dying (i.e., a zombie)? */
-    bool_t           is_dying;
+    enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
     /* Domain is paused by controller software? */
     bool_t           is_paused_by_controller;
 
@@ -335,7 +335,7 @@ static inline struct domain *rcu_lock_cu
 
 struct domain *get_domain_by_id(domid_t dom);
 void domain_destroy(struct domain *d);
-void domain_kill(struct domain *d);
+int domain_kill(struct domain *d);
 void domain_shutdown(struct domain *d, u8 reason);
 void domain_resume(struct domain *d);
 void domain_pause_for_debugger(void);



-- 
yamahata

Attachment: 15809_a9fc5e1662e0_make_xen_domctl_destroydomain_hypercall_continuable.patch
Description: Text Data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>