WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] cpupools: Make interface more consistent

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] cpupools: Make interface more consistent
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 27 Oct 2010 19:15:18 -0700
Delivery-date: Wed, 27 Oct 2010 19:15:29 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1287922190 -3600
# Node ID c6e2f69e1807fb232b16983b040489fe30241cd7
# Parent  0dc0bc411035725beb181eeb7b611eefa8ccf7d4
cpupools: Make interface more consistent

The current cpupools code interface is a bit inconsistent.  This
patch addresses this by making the interaction for each
vcpu in a pool look like this:

alloc_vdata() -- allocates and sets up vcpu data
insert_vcpu() -- the vcpu is ready to run in this pool
remove_vcpu() -- take the vcpu out of the pool
free_vdata()  -- delete allocated vcpu data

(Previously, remove_vcpu and free_vdata were combined into a "destroy
vcpu", and insert_vcpu was only called for idle vcpus.)

This also addresses a bug in credit2 which was caused by a
misunderstanding of the cpupools interface.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Acked-by: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
---
 xen/common/sched_credit.c  |   30 +++++++++++++++---------------
 xen/common/sched_credit2.c |   29 +++++++++++++----------------
 xen/common/sched_sedf.c    |    7 -------
 xen/common/schedule.c      |   10 ++++++++--
 xen/include/xen/sched-if.h |    3 ++-
 5 files changed, 38 insertions(+), 41 deletions(-)

diff -r 0dc0bc411035 -r c6e2f69e1807 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Thu Oct 21 18:51:36 2010 +0100
+++ b/xen/common/sched_credit.c Sun Oct 24 13:09:50 2010 +0100
@@ -677,9 +677,22 @@ static void
 static void
 csched_free_vdata(const struct scheduler *ops, void *priv)
 {
+    struct csched_vcpu *svc = priv;
+
+    BUG_ON( !list_empty(&svc->runq_elem) );
+
+    xfree(svc);
+}
+
+static void
+csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
+{
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct csched_vcpu *svc = priv;
+    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_dom * const sdom = svc->sdom;
     unsigned long flags;
+
+    CSCHED_STAT_CRANK(vcpu_destroy);
 
     if ( __vcpu_on_runq(svc) )
         __runq_remove(svc);
@@ -691,21 +704,8 @@ csched_free_vdata(const struct scheduler
 
     spin_unlock_irqrestore(&(prv->lock), flags);
 
-    xfree(svc);
-}
-
-static void
-csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc)
-{
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
-    struct csched_dom * const sdom = svc->sdom;
-
-    CSCHED_STAT_CRANK(vcpu_destroy);
-
     BUG_ON( sdom == NULL );
     BUG_ON( !list_empty(&svc->runq_elem) );
-
-    csched_free_vdata(ops, svc);
 }
 
 static void
@@ -1561,7 +1561,7 @@ const struct scheduler sched_credit_def 
     .destroy_domain = csched_dom_destroy,
 
     .insert_vcpu    = csched_vcpu_insert,
-    .destroy_vcpu   = csched_vcpu_destroy,
+    .remove_vcpu    = csched_vcpu_remove,
 
     .sleep          = csched_vcpu_sleep,
     .wake           = csched_vcpu_wake,
diff -r 0dc0bc411035 -r c6e2f69e1807 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Thu Oct 21 18:51:36 2010 +0100
+++ b/xen/common/sched_credit2.c        Sun Oct 24 13:09:50 2010 +0100
@@ -592,7 +592,18 @@ csched_free_vdata(const struct scheduler
 csched_free_vdata(const struct scheduler *ops, void *priv)
 {
     struct csched_vcpu *svc = priv;
-    struct vcpu *vc = svc->vcpu;
+
+    xfree(svc);
+}
+
+static void
+csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
+{
+    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched_dom * const sdom = svc->sdom;
+
+    BUG_ON( sdom == NULL );
+    BUG_ON( !list_empty(&svc->runq_elem) );
 
     if ( ! is_idle_vcpu(vc) )
     {
@@ -610,20 +621,6 @@ csched_free_vdata(const struct scheduler
 
         svc->sdom->nr_vcpus--;
     }
-
-    xfree(svc);
-}
-
-static void
-csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc)
-{
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
-    struct csched_dom * const sdom = svc->sdom;
-
-    BUG_ON( sdom == NULL );
-    BUG_ON( !list_empty(&svc->runq_elem) );
-
-    csched_free_vdata(ops, svc);
 }
 
 static void
@@ -1199,7 +1196,7 @@ const struct scheduler sched_credit2_def
     .destroy_domain = csched_dom_destroy,
 
     .insert_vcpu    = csched_vcpu_insert,
-    .destroy_vcpu   = csched_vcpu_destroy,
+    .remove_vcpu    = csched_vcpu_remove,
 
     .sleep          = csched_vcpu_sleep,
     .wake           = csched_vcpu_wake,
diff -r 0dc0bc411035 -r c6e2f69e1807 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Thu Oct 21 18:51:36 2010 +0100
+++ b/xen/common/sched_sedf.c   Sun Oct 24 13:09:50 2010 +0100
@@ -408,11 +408,6 @@ static void sedf_free_vdata(const struct
 static void sedf_free_vdata(const struct scheduler *ops, void *priv)
 {
     xfree(priv);
-}
-
-static void sedf_destroy_vcpu(const struct scheduler *ops, struct vcpu *v)
-{
-    sedf_free_vdata(ops, v->sched_priv);
 }
 
 static void *
@@ -1504,8 +1499,6 @@ const struct scheduler sched_sedf_def = 
     .init_domain    = sedf_init_domain,
     .destroy_domain = sedf_destroy_domain,
 
-    .destroy_vcpu   = sedf_destroy_vcpu,
-
     .alloc_vdata    = sedf_alloc_vdata,
     .free_vdata     = sedf_free_vdata,
     .alloc_pdata    = sedf_alloc_pdata,
diff -r 0dc0bc411035 -r c6e2f69e1807 xen/common/schedule.c
--- a/xen/common/schedule.c     Thu Oct 21 18:51:36 2010 +0100
+++ b/xen/common/schedule.c     Sun Oct 24 13:09:50 2010 +0100
@@ -219,6 +219,8 @@ int sched_init_vcpu(struct vcpu *v, unsi
     if ( v->sched_priv == NULL )
         return 1;
 
+    SCHED_OP(VCPU2OP(v), insert_vcpu, v);
+
     return 0;
 }
 
@@ -266,7 +268,8 @@ int sched_move_domain(struct domain *d, 
         migrate_timer(&v->singleshot_timer, new_p);
         migrate_timer(&v->poll_timer, new_p);
 
-        SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
+        SCHED_OP(VCPU2OP(v), remove_vcpu, v);
+        SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
 
         cpus_setall(v->cpu_affinity);
         v->processor = new_p;
@@ -274,6 +277,8 @@ int sched_move_domain(struct domain *d, 
         evtchn_move_pirqs(v);
 
         new_p = cycle_cpu(new_p, c->cpu_valid);
+
+        SCHED_OP(VCPU2OP(v), insert_vcpu, v);
     }
     domain_update_node_affinity(d);
 
@@ -295,7 +300,8 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
         atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
-    SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
+    SCHED_OP(VCPU2OP(v), remove_vcpu, v);
+    SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
 }
 
 int sched_init_domain(struct domain *d)
diff -r 0dc0bc411035 -r c6e2f69e1807 xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h        Thu Oct 21 18:51:36 2010 +0100
+++ b/xen/include/xen/sched-if.h        Sun Oct 24 13:09:50 2010 +0100
@@ -102,8 +102,9 @@ struct scheduler {
     int          (*init_domain)    (const struct scheduler *, struct domain *);
     void         (*destroy_domain) (const struct scheduler *, struct domain *);
 
+    /* Activate / deactivate vcpus in a cpu pool */
     void         (*insert_vcpu)    (const struct scheduler *, struct vcpu *);
-    void         (*destroy_vcpu)   (const struct scheduler *, struct vcpu *);
+    void         (*remove_vcpu)    (const struct scheduler *, struct vcpu *);
 
     void         (*sleep)          (const struct scheduler *, struct vcpu *);
     void         (*wake)           (const struct scheduler *, struct vcpu *);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] cpupools: Make interface more consistent, Xen patchbot-unstable <=