[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 for Xen 4.6 3/4] libxl: enabling XL to set per-VCPU parameters of a domain for RTDS scheduler



Add libxl_vcpu_sched_params_get/set and sched_rtds_vcpu_get/set functions to 
support
per-VCPU settings for RTDS scheduler.

Add a new data structure (libxl_vcpu_sched_params) to help per-VCPU settings.

Signed-off-by: Chong Li <chong.li@xxxxxxxxx>
Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
Signed-off-by: Sisu Xi <xisisu@xxxxxxxxx>
---
 tools/libxl/libxl.c         | 189 ++++++++++++++++++++++++++++++++++++++------
 tools/libxl/libxl.h         |  19 +++++
 tools/libxl/libxl_types.idl |  11 +++
 3 files changed, 196 insertions(+), 23 deletions(-)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index feb3aa9..169901a 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5797,6 +5797,120 @@ static int sched_sedf_domain_set(libxl__gc *gc, 
uint32_t domid,
     return 0;
 }
 
+static int sched_rtds_validate_params(libxl__gc *gc, uint32_t period,
+                                 uint32_t budget, uint32_t *sdom_period,
+                                 uint32_t *sdom_budget)
+{
+    if (period != LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT) {
+        if (period < 1) {
+            LOG(ERROR, "VCPU period is not set or out of range, "
+                       "valid values are larger than 1");
+            return ERROR_INVAL;
+        }
+        *sdom_period = period;
+    }
+
+    if (budget != LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT) {
+        if (budget < 1) {
+            LOG(ERROR, "VCPU budget is not set or out of range, "
+                       "valid values are larger than 1");
+            return ERROR_INVAL;
+        }
+        *sdom_budget = budget;
+    }
+
+    if (budget > period) {
+        LOG(ERROR, "VCPU budget is larger than VCPU period, "
+                   "VCPU budget should be no larger than VCPU period");
+        return ERROR_INVAL;
+    }
+
+    return 0;
+}
+
+static int sched_rtds_vcpu_get(libxl__gc *gc, uint32_t domid,
+                               libxl_vcpu_sched_params *scinfo)
+{
+    uint16_t num_vcpus;
+    int rc, i;
+    xc_dominfo_t info;
+
+    rc = xc_domain_getinfo(CTX->xch, domid, 1, &info);
+    if (rc < 0) {
+        LOGE(ERROR, "getting domain info");
+        return ERROR_FAIL;
+    }
+    num_vcpus = info.max_vcpu_id + 1;
+
+    struct xen_domctl_sched_rtds_params  *sdom = libxl__malloc(NOGC,
+                sizeof(struct xen_domctl_sched_rtds_params) * num_vcpus);
+    rc = xc_sched_rtds_vcpu_get(CTX->xch, domid, sdom, num_vcpus);
+    if (rc != 0) {
+        LOGE(ERROR, "getting vcpu sched rtds");
+        return ERROR_FAIL;
+    }
+
+    libxl_vcpu_sched_params_init(scinfo);
+
+    scinfo->sched = LIBXL_SCHEDULER_RTDS;
+    scinfo->num_vcpus = num_vcpus;
+    scinfo->vcpus = (libxl_rtds_vcpu *)
+            libxl__malloc(NOGC, sizeof(libxl_rtds_vcpu) * num_vcpus);
+    for(i = 0; i < num_vcpus; i++) {
+        scinfo->vcpus[i].period = sdom[i].period;
+        scinfo->vcpus[i].budget = sdom[i].budget;
+    }
+
+    return 0;
+}
+
+static int sched_rtds_vcpu_set(libxl__gc *gc, uint32_t domid,
+                               const libxl_vcpu_sched_params *scinfo)
+{
+    int rc;
+    int i;
+    uint16_t num_vcpus;
+    int vcpuid;
+    uint32_t budget, period;
+    xc_dominfo_t info;
+
+    rc = xc_domain_getinfo(CTX->xch, domid, 1, &info);
+    if (rc < 0) {
+        LOGE(ERROR, "getting domain info");
+        return ERROR_FAIL;
+    }
+    num_vcpus = info.max_vcpu_id + 1;
+
+    struct xen_domctl_sched_rtds_params  *sdom =
+            libxl__malloc(NOGC, scinfo->num_vcpus);
+    for (i = 0; i < scinfo->num_vcpus; i++) {
+        vcpuid = scinfo->vcpus[i].vcpuid;
+        budget = scinfo->vcpus[i].budget;
+        period = scinfo->vcpus[i].period;
+        if (vcpuid < 0 || vcpuid >= num_vcpus) {
+            LOG(ERROR, "VCPU index is out of range, "
+                       "valid values are within range from 0 to %d",
+                       num_vcpus);
+            return ERROR_INVAL;
+        }
+        sdom[i].vcpuid = vcpuid;
+
+        rc = sched_rtds_validate_params(gc, period, budget,
+                &sdom[i].period, &sdom[i].budget);
+        if (rc == ERROR_INVAL)
+            return rc;
+    }
+
+    rc = xc_sched_rtds_vcpu_set(CTX->xch, domid,
+            sdom, scinfo->num_vcpus);
+    if (rc != 0) {
+        LOGE(ERROR, "setting vcpu sched rtds");
+        return ERROR_FAIL;
+    }
+
+    return rc;
+}
+
 static int sched_rtds_domain_get(libxl__gc *gc, uint32_t domid,
                                libxl_domain_sched_params *scinfo)
 {
@@ -5830,29 +5944,10 @@ static int sched_rtds_domain_set(libxl__gc *gc, 
uint32_t domid,
         return ERROR_FAIL;
     }
 
-    if (scinfo->period != LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT) {
-        if (scinfo->period < 1) {
-            LOG(ERROR, "VCPU period is not set or out of range, "
-                       "valid values are larger than 1");
-            return ERROR_INVAL;
-        }
-        sdom.period = scinfo->period;
-    }
-
-    if (scinfo->budget != LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT) {
-        if (scinfo->budget < 1) {
-            LOG(ERROR, "VCPU budget is not set or out of range, "
-                       "valid values are larger than 1");
-            return ERROR_INVAL;
-        }
-        sdom.budget = scinfo->budget;
-    }
-
-    if (sdom.budget > sdom.period) {
-        LOG(ERROR, "VCPU budget is larger than VCPU period, "
-                   "VCPU budget should be no larger than VCPU period");
-        return ERROR_INVAL;
-    }
+    rc = sched_rtds_validate_params(gc, scinfo->period, scinfo->budget,
+            &sdom.period, &sdom.budget);
+    if (rc == ERROR_INVAL)
+        return rc;
 
     rc = xc_sched_rtds_domain_set(CTX->xch, domid, &sdom);
     if (rc < 0) {
@@ -5899,6 +5994,30 @@ int libxl_domain_sched_params_set(libxl_ctx *ctx, 
uint32_t domid,
     return ret;
 }
 
+int libxl_vcpu_sched_params_set(libxl_ctx *ctx, uint32_t domid,
+                                  const libxl_vcpu_sched_params *scinfo)
+{
+    GC_INIT(ctx);
+    libxl_scheduler sched = scinfo->sched;
+    int ret;
+
+    if (sched == LIBXL_SCHEDULER_UNKNOWN)
+        sched = libxl__domain_scheduler(gc, domid);
+
+    switch (sched) {
+    case LIBXL_SCHEDULER_RTDS:
+        ret=sched_rtds_vcpu_set(gc, domid, scinfo);
+        break;
+    default:
+        LOG(ERROR, "Unknown scheduler");
+        ret=ERROR_INVAL;
+        break;
+    }
+
+    GC_FREE;
+    return ret;
+}
+
 int libxl_domain_sched_params_get(libxl_ctx *ctx, uint32_t domid,
                                   libxl_domain_sched_params *scinfo)
 {
@@ -5932,6 +6051,30 @@ int libxl_domain_sched_params_get(libxl_ctx *ctx, 
uint32_t domid,
     return ret;
 }
 
+int libxl_vcpu_sched_params_get(libxl_ctx *ctx, uint32_t domid,
+                                  libxl_vcpu_sched_params *scinfo)
+{
+    GC_INIT(ctx);
+    int ret;
+
+    libxl_vcpu_sched_params_init(scinfo);
+
+    scinfo->sched = libxl__domain_scheduler(gc, domid);
+
+    switch (scinfo->sched) {
+    case LIBXL_SCHEDULER_RTDS:
+        ret=sched_rtds_vcpu_get(gc, domid, scinfo);
+        break;
+    default:
+        LOG(ERROR, "Unknown scheduler");
+        ret=ERROR_INVAL;
+        break;
+    }
+
+    GC_FREE;
+    return ret;
+}
+
 static int libxl__domain_s3_resume(libxl__gc *gc, int domid)
 {
     int rc = 0;
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 44bd8e2..e565814 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -192,6 +192,18 @@
  * is not present, instead of ERROR_INVAL.
  */
 #define LIBXL_HAVE_ERROR_DOMAIN_NOTFOUND 1
+
+/*
+ * libxl_vcpu_sched_params is used to get/set per-vcpu params
+*/
+#define LIBXL_HAVE_VCPU_SCHED_PARAMS 1
+
+/*
+ * libxl_rtds_vcpu is used to represent the rtds scheduling params
+ * of a vcpu
+*/
+#define LIBXL_HAVE_RTDS_VCPU 1
+
 /*
  * libxl ABI compatibility
  *
@@ -1460,10 +1472,17 @@ int libxl_sched_credit_params_set(libxl_ctx *ctx, 
uint32_t poolid,
 #define LIBXL_DOMAIN_SCHED_PARAM_EXTRATIME_DEFAULT -1
 #define LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT    -1
 
+/*RTDS Per-VCPU parameters*/
+#define LIBXL_DOMAIN_SCHED_PARAM_VCPU_INDEX_DEFAULT -1
+
 int libxl_domain_sched_params_get(libxl_ctx *ctx, uint32_t domid,
                                   libxl_domain_sched_params *params);
 int libxl_domain_sched_params_set(libxl_ctx *ctx, uint32_t domid,
                                   const libxl_domain_sched_params *params);
+int libxl_vcpu_sched_params_get(libxl_ctx *ctx, uint32_t domid,
+                                  libxl_vcpu_sched_params *params);
+int libxl_vcpu_sched_params_set(libxl_ctx *ctx, uint32_t domid,
+                                  const libxl_vcpu_sched_params *params);
 
 int libxl_send_trigger(libxl_ctx *ctx, uint32_t domid,
                        libxl_trigger trigger, uint32_t vcpuid);
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index 117b61d..d28d274 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -347,6 +347,17 @@ libxl_domain_restore_params = 
Struct("domain_restore_params", [
     ("checkpointed_stream", integer),
     ])
 
+libxl_rtds_vcpu = Struct("rtds_vcpu",[
+    ("period",       uint32, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT'}),
+    ("budget",       uint32, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT'}),
+    ("vcpuid",        integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_VCPU_INDEX_DEFAULT'}),
+    ])
+
+libxl_vcpu_sched_params = Struct("vcpu_sched_params",[
+    ("sched",        libxl_scheduler),
+    ("vcpus",        Array(libxl_rtds_vcpu, "num_vcpus")),
+    ])
+
 libxl_domain_sched_params = Struct("domain_sched_params",[
     ("sched",        libxl_scheduler),
     ("weight",       integer, {'init_val': 
'LIBXL_DOMAIN_SCHED_PARAM_WEIGHT_DEFAULT'}),
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.