[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] v2 (Re: cs:21768 causes guest spend more time on boot up)



Here's another patch that also unsticks CentOS 5.5 boot for me, and
seems safer and saner (even if it turns out that the bug is somewhere
else and I'm just perturbing the inputs to some more complex system...)

Cheers,

Tim.

hvmloader: clear the xenbus event-channel when we're done with it.
Otherwise a later xenbus client that naively waits for the rising edge
could get stuck.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r c4a83e3cc6b4 tools/firmware/hvmloader/util.c
--- a/tools/firmware/hvmloader/util.c   Thu Jul 15 18:18:16 2010 +0100
+++ b/tools/firmware/hvmloader/util.c   Tue Jul 20 11:34:06 2010 +0100
@@ -587,10 +587,28 @@
     return table;
 }
 
+struct shared_info *get_shared_info(void) 
+{
+    static struct shared_info *shared_info = NULL;
+    struct xen_add_to_physmap xatp;
+
+    if ( shared_info == NULL )
+    {
+        /* Map shared-info page. */
+        xatp.domid = DOMID_SELF;
+        xatp.space = XENMAPSPACE_shared_info;
+        xatp.idx   = 0;
+        xatp.gpfn  = 0xfffff;
+        if ( hypercall_memory_op(XENMEM_add_to_physmap, &xatp) != 0 )
+            BUG();
+        shared_info = (struct shared_info *) (xatp.gpfn << 12);
+    }
+    return shared_info;
+}
+
 uint16_t get_cpu_mhz(void)
 {
-    struct xen_add_to_physmap xatp;
-    struct shared_info *shared_info = (struct shared_info *)0xfffff000;
+    struct shared_info *shared_info = get_shared_info();
     struct vcpu_time_info *info = &shared_info->vcpu_info[0].time;
     uint64_t cpu_khz;
     uint32_t tsc_to_nsec_mul, version;
@@ -599,14 +617,6 @@
     static uint16_t cpu_mhz;
     if ( cpu_mhz != 0 )
         return cpu_mhz;
-
-    /* Map shared-info page. */
-    xatp.domid = DOMID_SELF;
-    xatp.space = XENMAPSPACE_shared_info;
-    xatp.idx   = 0;
-    xatp.gpfn  = (unsigned long)shared_info >> 12;
-    if ( hypercall_memory_op(XENMEM_add_to_physmap, &xatp) != 0 )
-        BUG();
 
     /* Get a consistent snapshot of scale factor (multiplier and shift). */
     do {
diff -r c4a83e3cc6b4 tools/firmware/hvmloader/util.h
--- a/tools/firmware/hvmloader/util.h   Thu Jul 15 18:18:16 2010 +0100
+++ b/tools/firmware/hvmloader/util.h   Tue Jul 20 11:34:06 2010 +0100
@@ -68,6 +68,9 @@
 #define pci_writeb(devfn, reg, val) (pci_write(devfn, reg, 1, (uint8_t) val))
 #define pci_writew(devfn, reg, val) (pci_write(devfn, reg, 2, (uint16_t)val))
 #define pci_writel(devfn, reg, val) (pci_write(devfn, reg, 4, (uint32_t)val))
+
+/* Get a pointer to the shared-info page */
+struct shared_info *get_shared_info(void);
 
 /* Get CPU speed in MHz. */
 uint16_t get_cpu_mhz(void);
diff -r c4a83e3cc6b4 tools/firmware/hvmloader/xenbus.c
--- a/tools/firmware/hvmloader/xenbus.c Thu Jul 15 18:18:16 2010 +0100
+++ b/tools/firmware/hvmloader/xenbus.c Tue Jul 20 11:34:06 2010 +0100
@@ -53,14 +53,20 @@
            (unsigned long) rings, (unsigned long) event);
 }
 
-/* Reset the xenbus connection so the next kernel can start again. 
- * We zero out the whole ring -- the backend can handle this, and it's 
- * not going to surprise any frontends since it's equivalent to never 
- * having used the rings. */
+/* Reset the xenbus connection so the next kernel can start again. */
 void xenbus_shutdown(void)
 {
     ASSERT(rings != NULL);
+
+    /* We zero out the whole ring -- the backend can handle this, and it's 
+     * not going to surprise any frontends since it's equivalent to never 
+     * having used the rings. */
     memset(rings, 0, sizeof *rings);
+
+    /* Clear the xenbus event-channel too */
+    get_shared_info()->evtchn_pending[event / sizeof (unsigned long)]
+        &= ~(1UL << ((event % sizeof (unsigned long))));    
+
     rings = NULL;
 }
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.