[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/4] qemu-xen: support qdisk with stubdoms



In the qemu used as backend provider for stubdoms, register two qdisk
backends: one for the stubdom and one for the guest.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

diff --git a/hw/xen_backend.c b/hw/xen_backend.c
index afd0bf2..537e21f 100644
--- a/hw/xen_backend.c
+++ b/hw/xen_backend.c
@@ -526,7 +526,7 @@ void xen_be_check_state(struct XenDevice *xendev)
 
 /* ------------------------------------------------------------- */
 
-static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops)
+int xenstore_scan(const char *type, int dom, struct XenDevOps *ops)
 {
     struct XenDevice *xendev;
     char path[XEN_BUFSIZE], token[XEN_BUFSIZE];
diff --git a/hw/xen_backend.h b/hw/xen_backend.h
index c48c593..28e77ac 100644
--- a/hw/xen_backend.h
+++ b/hw/xen_backend.h
@@ -78,6 +78,7 @@ void xen_be_check_state(struct XenDevice *xendev);
 /* xen backend driver bits */
 int xen_be_init(void);
 int xen_be_register(const char *type, struct XenDevOps *ops);
+int xenstore_scan(const char *type, int dom, struct XenDevOps *ops);
 int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state);
 int xen_be_bind_evtchn(struct XenDevice *xendev);
 void xen_be_unbind_evtchn(struct XenDevice *xendev);
diff --git a/hw/xen_machine_pv.c b/hw/xen_machine_pv.c
index b2475ba..434a902 100644
--- a/hw/xen_machine_pv.c
+++ b/hw/xen_machine_pv.c
@@ -44,6 +44,7 @@ static void xen_init_pv(ram_addr_t ram_size, int vga_ram_size,
                        const char *direct_pci)
 {
     CPUState *env;
+    uint32_t domid_target;
 
 #ifndef CONFIG_STUBDOM
     /* Initialize tapdisk client */
@@ -70,6 +71,9 @@ static void xen_init_pv(ram_addr_t ram_size, int vga_ram_size,
     xen_be_register("vkbd", &xen_kbdmouse_ops);
     xen_be_register("vfb", &xen_framebuffer_ops);
     xen_be_register("qdisk", &xen_blkdev_ops);
+    domid_target = xenstore_read_target();
+    if (domid_target)
+        xenstore_scan("qdisk", domid_target, &xen_blkdev_ops);
 
     /* setup framebuffer */
     xen_init_display(xen_domid);
diff --git a/qemu-xen.h b/qemu-xen.h
index 7821b54..0e70dbe 100644
--- a/qemu-xen.h
+++ b/qemu-xen.h
@@ -63,6 +63,7 @@ void handle_buffered_pio(void);
 
 /* xenstore.c */
 void xenstore_init(void);
+uint32_t xenstore_read_target(void);
 void xenstore_parse_domain_config(int domid);
 int xenstore_parse_disable_pf_config(void);
 int xenstore_fd(void);
diff --git a/xenstore.c b/xenstore.c
index b7e7c28..d364a5e 100644
--- a/xenstore.c
+++ b/xenstore.c
@@ -391,6 +391,42 @@ static const char *xenstore_get_guest_uuid(void)
     return already_computed;
 }
 
+uint32_t xenstore_read_target(void)
+{
+    char *domain_path = NULL, *target_path = NULL, *target_value = NULL, *p = 
NULL;
+    unsigned int len;
+    uint32_t target_domid = 0;
+
+    if (xsh == NULL)
+        return 0;
+
+    domain_path = xs_get_domain_path(xsh, domid);
+    if (domain_path == NULL) {
+        fprintf(logfile, "xs_get_domain_path() error. domid %d.\n", domid);
+        goto out;
+    }
+
+    if (pasprintf(&target_path, "%s/target", domain_path) == -1) {
+        fprintf(logfile, "xenstore_get_guest_uuid(): out of memory.\n");
+        goto out;
+    }
+    target_value = xs_read(xsh, XBT_NULL, target_path, &len);
+    if (target_value == NULL) {
+        fprintf(logfile, "xs_read(): target get error. %s.\n", target_path);
+        goto out;
+    }
+
+    fprintf(logfile, "target = %s\n", target_value);
+    target_domid = strtoul(target_value, NULL, 10);
+
+ out:
+    free(domain_path);
+    free(target_path);
+    free(target_value);
+
+    return target_domid;
+}
+
 #define PT_PCI_MSITRANSLATE_DEFAULT 1
 #define PT_PCI_POWER_MANAGEMENT_DEFAULT 0
 int direct_pci_msitranslate;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.