[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2 of 2] kdump pv-on-hvm: reset PV devices in crash kernel


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Tue, 05 Apr 2011 12:14:29 +0200
  • Delivery-date: Tue, 05 Apr 2011 03:17:27 -0700
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1301997987 -7200
# Node ID 0bbf646804818f582e91dc88038d1c79f76ba36e
# Parent  6cc1c07f0e74cd481d1cde0f17bec2631a767891
kdump pv-on-hvm: reset PV devices in crash kernel

After triggering a crash dump in a HVM guest, the PV backend drivers
will remain in connected state.  When the kdump kernel starts the PV
drivers will skip such devices.  As a result, no root device is found
and the vmcore cant be saved.

With this change all frontend devices with state XenbusStateConnected
will be reset by changing the state file to Closing/Closed/Initializing.
This will trigger a disconnect in the backend drivers. Now the frontend
drivers will find the backend drivers in state Initwait and can connect.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 drivers/xen/xenbus/xenbus_comms.c |    4 +
 drivers/xen/xenbus/xenbus_probe.c |   96 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+), 1 deletion(-)

diff -r 6cc1c07f0e74 -r 0bbf64680481 drivers/xen/xenbus/xenbus_comms.c
--- a/drivers/xen/xenbus/xenbus_comms.c Tue Apr 05 12:06:20 2011 +0200
+++ b/drivers/xen/xenbus/xenbus_comms.c Tue Apr 05 12:06:27 2011 +0200
@@ -234,7 +234,9 @@
                printk(KERN_WARNING "XENBUS response ring is not quiescent "
                       "(%08x:%08x): fixing up\n",
                       intf->rsp_cons, intf->rsp_prod);
-               intf->rsp_cons = intf->rsp_prod;
+               /* breaks kdump */
+               if (!reset_devices)
+                       intf->rsp_cons = intf->rsp_prod;
        }
 
        if (xenbus_irq)
diff -r 6cc1c07f0e74 -r 0bbf64680481 drivers/xen/xenbus/xenbus_probe.c
--- a/drivers/xen/xenbus/xenbus_probe.c Tue Apr 05 12:06:20 2011 +0200
+++ b/drivers/xen/xenbus/xenbus_probe.c Tue Apr 05 12:06:27 2011 +0200
@@ -856,11 +856,107 @@
 }
 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 
+#ifdef CONFIG_CRASH_DUMP
+static DECLARE_WAIT_QUEUE_HEAD(be_state_wq);
+static int be_state;
+
+static void xenbus_reset_state_changed(struct xenbus_watch *w, const char **v, 
unsigned int l)
+{
+       xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &be_state);
+       printk(KERN_INFO "XENBUS: %s %s\n", v[XS_WATCH_PATH], 
xenbus_strstate(be_state));
+       wake_up(&be_state_wq);
+}
+
+static int xenbus_reset_check_final(int *st)
+{
+       return *st == XenbusStateInitialising || *st == XenbusStateInitWait;
+}
+
+static void xenbus_reset_frontend_state(char *backend, char *frontend)
+{
+       struct xenbus_watch watch;
+
+       memset(&watch, 0, sizeof(watch));
+       watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", backend);
+       if (!watch.node)
+               return;
+
+       watch.callback = xenbus_reset_state_changed;
+       be_state = XenbusStateUnknown;
+
+       printk(KERN_INFO "XENBUS: triggering reconnect on %s\n", backend);
+       register_xenbus_watch(&watch);
+
+       xenbus_printf(XBT_NIL, frontend, "state", "%d", XenbusStateClosing);
+       wait_event_interruptible(be_state_wq, be_state == XenbusStateClosing);
+
+       xenbus_printf(XBT_NIL, frontend, "state", "%d", XenbusStateClosed);
+       wait_event_interruptible(be_state_wq, be_state == XenbusStateClosed);
+
+       xenbus_printf(XBT_NIL, frontend, "state", "%d", 
XenbusStateInitialising);
+       wait_event_interruptible(be_state_wq, 
xenbus_reset_check_final(&be_state));
+
+       unregister_xenbus_watch(&watch);
+       printk(KERN_INFO "XENBUS: reconnect done on %s\n", backend);
+       kfree(watch.node);
+}
+
+static void xenbus_reset_check_state(char *class, char *dev)
+{
+       int state, err;
+       char *backend, *frontend;
+
+       frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev);
+       if (!frontend)
+               return;
+
+       err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &state);
+       /* frontend connected? */
+       if (err == 1 && state == XenbusStateConnected) {
+               backend = xenbus_read(XBT_NIL, frontend, "backend", NULL);
+               if (!backend || IS_ERR(backend))
+                       goto out;
+               err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &state);
+               /* backend connected? */
+               if (err == 1 && state == XenbusStateConnected)
+                       xenbus_reset_frontend_state(backend, frontend);
+               kfree(backend);
+       }
+out:
+       kfree(frontend);
+}
+
+static void xenbus_reset_state(void)
+{
+       char **devclass, **dev;
+       int devclass_n, dev_n;
+       int i, j;
+
+       devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n);
+       if (IS_ERR(devclass))
+               return;
+
+       for (i = 0; i < devclass_n; i++) {
+               dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n);
+               if (IS_ERR(dev))
+                       continue;
+               for (j = 0; j < dev_n; j++)
+                       xenbus_reset_check_state(devclass[i], dev[j]);
+               kfree(dev);
+       }
+       kfree(devclass);
+}
+#endif
 
 void xenbus_probe(void *unused)
 {
        BUG_ON(!is_xenstored_ready());
 
+#ifdef CONFIG_CRASH_DUMP
+       /* reset devices in XenbusStateConnected state */
+       if (!is_initial_xendomain() && reset_devices)
+               xenbus_reset_state();
+#endif
        /* Enumerate devices in xenstore and watch for changes. */
        xenbus_probe_devices(&xenbus_frontend);
        register_xenbus_watch(&fe_watch);

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.