[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/5] libxl: implementating legacy xm cpuid parser



To support legacy xm-based config files, add a parser for the old
style cpuid= syntax. Provided for backward compatibility.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>

--
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 448-3567-12
>From 0019b6d3609a200a5cb4e0808cf3ced80084daa1 Mon Sep 17 00:00:00 2001
From: Andre Przywara <andre.przywara@xxxxxxx>
Date: Wed, 1 Sep 2010 09:22:35 +0200
Subject: [PATCH 4/5] libxl: implementating legacy xm cpuid parser

To support legacy xm-based config files, add a parser for the old
style cpuid= syntax. This uses a Python list, so it can be
distinguished from the new syntax easily.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 tools/libxl/libxl.c |   60 +++++++++++++++++++++++++++++++++++++++++++++++++++
 tools/libxl/libxl.h |    2 +
 2 files changed, 62 insertions(+), 0 deletions(-)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 04c8e33..e3136f0 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -3446,6 +3446,66 @@ int libxl_cpuid_parse_config(libxl_cpuid_policy_list 
*cpuid, const char* str)
     return 0;
 }
 
+/* parse a single list item from the legacy Python xm syntax, where
+ * the strings for each register were directly exposed to the user.
+ * Used for maintaining compatibility with older config files
+ */
+int libxl_cpuid_parse_config_legacy(libxl_cpuid_policy_list *cpuid,
+                                    const char* str)
+{
+    char *endptr;
+    unsigned long value;
+    uint32_t leaf, subleaf = XEN_CPUID_INPUT_UNUSED;
+    struct libxl_cpuid_policy *entry;
+
+    /* parse the leaf number */
+    value = strtoul(str, &endptr, 0);
+    if (str == endptr) {
+        return 1;
+    }
+    leaf = value;
+    /* check for an optional subleaf number */
+    if (*endptr == ',') {
+        str = endptr + 1;
+        value = strtoul(str, &endptr, 0);
+        if (str == endptr) {
+            return 2;
+        }
+        subleaf = value;
+    }
+    if (*endptr != ':') {
+        return 3;
+    }
+    str = endptr + 1;
+    entry = cpuid_find_match(cpuid, leaf, subleaf);
+    for (str = endptr + 1; *str != 0;) {
+        if (str[0] != 'e' || str[2] != 'x') {
+            return 4;
+        }
+        value = str[1] - 'a';
+        endptr = strchr(str, '=');
+        if (value < 0 || value > 3 || endptr == NULL) {
+            return 4;
+        }
+        str = endptr + 1;
+        endptr = strchr(str, ',');
+        if (endptr == NULL) {
+            endptr = strchr(str, 0);
+        }
+        if (endptr - str != 32) {
+            return 5;
+        }
+        entry->policy[value] = calloc(32 + 1, 1);
+        strncpy(entry->policy[value], str, 32);
+        entry->policy[value][32] = 0;
+        if (*endptr == 0) {
+            break;
+        }
+        for (str = endptr + 1; *str == ' ' || *str == '\n'; str++);
+    }
+    return 0;
+}
+
 char *libxl_tmem_list(libxl_ctx *ctx, uint32_t domid, int use_long)
 {
     int rc;
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 484173d..12d27bc 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -402,6 +402,8 @@ int libxl_device_pci_list_assigned(libxl_ctx *ctx, 
libxl_device_pci **list, uint
 int libxl_device_pci_list_assignable(libxl_ctx *ctx, libxl_device_pci **list, 
int *num);
 int libxl_device_pci_parse_bdf(libxl_ctx *ctx, libxl_device_pci *pcidev, const 
char *str);
 int libxl_cpuid_parse_config(libxl_cpuid_policy_list *cpuid, const char* str);
+int libxl_cpuid_parse_config_legacy(libxl_cpuid_policy_list *cpuid,
+                                    const char* str);
 
 /*
  * Functions for allowing users of libxl to store private data
-- 
1.6.4

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.