[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 19/39] xen/xenbus: xenbus uninit support



This allows reinitialization of xenbus which is useful for
xen_shim_domain() support. Cleaning xenbus state means cancelling
pending watch events, and deleting all watches, closing xenstore event
channel and finally stopping xenbus/xenwatch kthreads alongside
unregistering /proc/xen.

Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx>
---
 drivers/xen/xenbus/xenbus.h        |  2 ++
 drivers/xen/xenbus/xenbus_client.c |  5 ++++
 drivers/xen/xenbus/xenbus_probe.c  | 51 +++++++++++++++++++++++++++++++++++---
 drivers/xen/xenbus/xenbus_xs.c     | 38 ++++++++++++++++++++++++++++
 4 files changed, 93 insertions(+), 3 deletions(-)

diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 092981171df1..e0e586d81d48 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -96,6 +96,7 @@ extern wait_queue_head_t xb_waitq;
 extern struct mutex xb_write_mutex;
 
 int xs_init(void);
+void xs_deinit(void);
 int xb_init_comms(void);
 void xb_deinit_comms(void);
 int xs_watch_msg(struct xs_watch_event *event);
@@ -129,6 +130,7 @@ int xenbus_read_otherend_details(struct xenbus_device 
*xendev,
                                 char *id_node, char *path_node);
 
 void xenbus_ring_ops_init(void);
+void xenbus_ring_ops_deinit(void);
 
 int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
 void xenbus_dev_queue_reply(struct xb_req_data *req);
diff --git a/drivers/xen/xenbus/xenbus_client.c 
b/drivers/xen/xenbus/xenbus_client.c
index e17ca8156171..ada1c9aa6525 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -935,3 +935,8 @@ void __init xenbus_ring_ops_init(void)
 #endif
                ring_ops = &ring_ops_hvm;
 }
+
+void xenbus_ring_ops_deinit(void)
+{
+       ring_ops = NULL;
+}
diff --git a/drivers/xen/xenbus/xenbus_probe.c 
b/drivers/xen/xenbus/xenbus_probe.c
index 5b471889d723..2e0ed46b05e7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -741,6 +741,21 @@ static int __init xenstored_local_init(void)
        return err;
 }
 
+static void xenstored_local_deinit(void)
+{
+       struct evtchn_close close;
+       void *page = NULL;
+
+       page = gfn_to_virt(xen_store_gfn);
+       free_page((unsigned long)page);
+
+       close.port = xen_store_evtchn;
+
+       HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+
+       xen_store_evtchn = 0;
+}
+
 static int xenbus_resume_cb(struct notifier_block *nb,
                            unsigned long action, void *data)
 {
@@ -765,7 +780,11 @@ static struct notifier_block xenbus_resume_nb = {
        .notifier_call = xenbus_resume_cb,
 };
 
-static int __init xenbus_init(void)
+#ifdef CONFIG_XEN_COMPAT_XENFS
+struct proc_dir_entry *xen_procfs;
+#endif
+
+int xenbus_init(void)
 {
        int err = 0;
        uint64_t v = 0;
@@ -833,13 +852,39 @@ static int __init xenbus_init(void)
         * Create xenfs mountpoint in /proc for compatibility with
         * utilities that expect to find "xenbus" under "/proc/xen".
         */
-       proc_create_mount_point("xen");
+       xen_procfs = proc_create_mount_point("xen");
 #endif
 
 out_error:
        return err;
 }
-
+EXPORT_SYMBOL_GPL(xenbus_init);
 postcore_initcall(xenbus_init);
 
+void xenbus_deinit(void)
+{
+       if (!xen_domain())
+               return;
+
+#ifdef CONFIG_XEN_COMPAT_XENFS
+       proc_remove(xen_procfs);
+       xen_procfs = NULL;
+#endif
+
+       xs_deinit();
+       xenstored_ready = 0;
+
+       switch (xen_store_domain_type) {
+       case XS_LOCAL:
+               xenstored_local_deinit();
+               xen_store_interface = NULL;
+               break;
+       default:
+               pr_warn("Xenstore state unknown\n");
+               break;
+       }
+       xenbus_ring_ops_deinit();
+}
+EXPORT_SYMBOL_GPL(xenbus_deinit);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 49a3874ae6bb..bd6db3703972 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -866,6 +866,7 @@ static int xenwatch_thread(void *unused)
 
        for (;;) {
                wait_event_interruptible(watch_events_waitq,
+                                        kthread_should_stop() ||
                                         !list_empty(&watch_events));
 
                if (kthread_should_stop())
@@ -917,6 +918,8 @@ static struct notifier_block xs_reboot_nb = {
        .notifier_call = xs_reboot_notify,
 };
 
+static struct task_struct *xenwatch_task;
+
 int xs_init(void)
 {
        int err;
@@ -932,9 +935,44 @@ int xs_init(void)
        task = kthread_run(xenwatch_thread, NULL, "xenwatch");
        if (IS_ERR(task))
                return PTR_ERR(task);
+       xenwatch_task = task;
 
        /* shutdown watches for kexec boot */
        xs_reset_watches();
 
        return 0;
 }
+
+void cancel_watches(void)
+{
+       struct xs_watch_event *event, *tmp;
+
+       /* Cancel pending watch events. */
+       spin_lock(&watch_events_lock);
+       list_for_each_entry_safe(event, tmp, &watch_events, list) {
+               list_del(&event->list);
+               kfree(event);
+       }
+       spin_unlock(&watch_events_lock);
+}
+
+void delete_watches(void)
+{
+       struct xenbus_watch *watch, *tmp;
+
+       spin_lock(&watches_lock);
+       list_for_each_entry_safe(watch, tmp, &watches, list) {
+               list_del(&watch->list);
+       }
+       spin_unlock(&watches_lock);
+}
+
+void xs_deinit(void)
+{
+       kthread_stop(xenwatch_task);
+       xenwatch_task = NULL;
+       xb_deinit_comms();
+       unregister_reboot_notifier(&xs_reboot_nb);
+       cancel_watches();
+       delete_watches();
+}
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.