[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 46/60] hyper_dmabuf: delay auto initialization of comm_env



Comm env intialization is now scheduled to be done
when xenstore is initialized. This scheduling is done
in driver's init routine.

Also, adding a recursively scheduled routine
that monitors any new tx ch setup from other domains
and automaticlaly configure rx channel accordingly
(every 10 sec).

Only limitation is it currently checks domain ID 0 to 10.
We could increase this range if needed.

With this patch, we don't have to call comm channel
setup IOCTL on importer side anymore. For example,
we can remove ioctl call in init_hyper_dmabuf from
vmdisplay.

Signed-off-by: Dongwon Kim <dongwon.kim@xxxxxxxxx>
---
 drivers/xen/hyper_dmabuf/Kconfig                   |  10 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c        |  64 +++++----
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h        |   3 +
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 153 ++++++++++++++++++++-
 4 files changed, 199 insertions(+), 31 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/Kconfig b/drivers/xen/hyper_dmabuf/Kconfig
index eb1b637..5efcd44 100644
--- a/drivers/xen/hyper_dmabuf/Kconfig
+++ b/drivers/xen/hyper_dmabuf/Kconfig
@@ -29,4 +29,14 @@ config HYPER_DMABUF_EVENT_GEN
          shared DMA-BUF is available. Events in the list can be retrieved by
          read operation.
 
+config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
+       bool "Enable automatic rx-ch add with 10 secs interval"
+       default y
+       depends on HYPER_DMABUF && HYPER_DMABUF_XEN
+       help
+         If enabled, driver reads a node in xenstore every 10 seconds
+         to check whether there is any tx comm ch configured by another
+         domain then initialize matched rx comm ch automatically for any
+         existing tx comm chs.
+
 endmenu
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 2845224..005677d 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -67,27 +67,6 @@ int hyper_dmabuf_open(struct inode *inode, struct file *filp)
        if (filp->f_flags & O_EXCL)
                return -EBUSY;
 
-       /*
-        * Initialize backend if needed,
-        * use mutex to prevent race conditions when
-        * two userspace apps will open device at the same time
-        */
-       mutex_lock(&hyper_dmabuf_private.lock);
-
-       if (!hyper_dmabuf_private.backend_initialized) {
-               hyper_dmabuf_private.domid = 
hyper_dmabuf_private.backend_ops->get_vm_id();
-
-               ret = hyper_dmabuf_private.backend_ops->init_comm_env();
-               if (ret < 0) {
-                       dev_err(hyper_dmabuf_private.device,
-                               "failed to initiailize hypervisor-specific comm 
env\n");
-               } else {
-                       hyper_dmabuf_private.backend_initialized = true;
-               }
-       }
-
-       mutex_unlock(&hyper_dmabuf_private.lock);
-
        return ret;
 }
 
@@ -260,17 +239,22 @@ static int __init hyper_dmabuf_drv_init(void)
                return ret;
        }
 
+/* currently only supports XEN hypervisor */
+
 #ifdef CONFIG_HYPER_DMABUF_XEN
        hyper_dmabuf_private.backend_ops = &xen_backend_ops;
+#else
+       hyper_dmabuf_private.backend_ops = NULL;
+       printk( KERN_ERR "hyper_dmabuf drv currently supports XEN only.\n");
 #endif
-       /*
-        * Defer backend setup to first open call.
-        * Due to fact that some hypervisors eg. Xen, may have dependencies
-        * to userspace daemons like xenstored, in that case all xenstore
-        * calls done from kernel will block until that deamon will be
-        * started, in case where module is built in that will block entire
-        * kernel initialization.
-        */
+
+       if (hyper_dmabuf_private.backend_ops == NULL) {
+               printk( KERN_ERR "Hyper_dmabuf: failed to be loaded - no 
backend found\n");
+               return -1;
+       }
+
+       mutex_lock(&hyper_dmabuf_private.lock);
+
        hyper_dmabuf_private.backend_initialized = false;
 
        dev_info(hyper_dmabuf_private.device,
@@ -301,6 +285,22 @@ static int __init hyper_dmabuf_drv_init(void)
        init_waitqueue_head(&hyper_dmabuf_private.event_wait);
 
        hyper_dmabuf_private.curr_num_event = 0;
+       hyper_dmabuf_private.exited = false;
+
+       hyper_dmabuf_private.domid = 
hyper_dmabuf_private.backend_ops->get_vm_id();
+
+       ret = hyper_dmabuf_private.backend_ops->init_comm_env();
+       if (ret < 0) {
+               dev_dbg(hyper_dmabuf_private.device,
+                       "failed to initialize comm-env but it will 
re-attempt.\n");
+       } else {
+               hyper_dmabuf_private.backend_initialized = true;
+       }
+
+       mutex_unlock(&hyper_dmabuf_private.lock);
+
+       dev_info(hyper_dmabuf_private.device,
+               "Finishing up initialization of hyper_dmabuf drv\n");
 
        /* interrupt for comm should be registered here: */
        return ret;
@@ -312,6 +312,8 @@ static void hyper_dmabuf_drv_exit(void)
        hyper_dmabuf_unregister_sysfs(hyper_dmabuf_private.device);
 #endif
 
+       mutex_lock(&hyper_dmabuf_private.lock);
+
        /* hash tables for export/import entries and ring_infos */
        hyper_dmabuf_table_destroy();
 
@@ -325,6 +327,10 @@ static void hyper_dmabuf_drv_exit(void)
        if (hyper_dmabuf_private.id_queue)
                destroy_reusable_list();
 
+       hyper_dmabuf_private.exited = true;
+
+       mutex_unlock(&hyper_dmabuf_private.lock);
+
        dev_info(hyper_dmabuf_private.device,
                 "hyper_dmabuf driver: Exiting\n");
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 08e8ed7..a4acdd9f 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -64,6 +64,9 @@ struct hyper_dmabuf_private {
        struct mutex event_read_lock;
 
        int curr_num_event;
+
+       /* indicate whether the driver is unloaded */
+       bool exited;
 };
 
 struct list_reusable_id {
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index 370a07d..920ecf4 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -47,6 +47,14 @@ struct hyper_dmabuf_req req_pending = {0};
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
+extern int xenstored_ready;
+
+static void xen_get_domid_delayed(struct work_struct *unused);
+static void xen_init_comm_env_delayed(struct work_struct *unused);
+
+static DECLARE_DELAYED_WORK(get_vm_id_work, xen_get_domid_delayed);
+static DECLARE_DELAYED_WORK(xen_init_comm_env_work, xen_init_comm_env_delayed);
+
 /* Creates entry in xen store that will keep details of all
  * exporter rings created by this domain
  */
@@ -54,7 +62,7 @@ static int xen_comm_setup_data_dir(void)
 {
        char buf[255];
 
-       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_xen_get_domid());
+       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_private.domid);
        return xenbus_mkdir(XBT_NIL, buf, "");
 }
 
@@ -68,7 +76,7 @@ static int xen_comm_destroy_data_dir(void)
 {
        char buf[255];
 
-       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_xen_get_domid());
+       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_private.domid);
        return xenbus_rm(XBT_NIL, buf, "");
 }
 
@@ -131,16 +139,58 @@ static int xen_comm_get_ring_details(int domid, int 
rdomid, int *grefid, int *po
        return (ret <= 0 ? 1 : 0);
 }
 
+void xen_get_domid_delayed(struct work_struct *unused)
+{
+       struct xenbus_transaction xbt;
+       int domid, ret;
+
+       /* scheduling another if driver is still running
+        * and xenstore has not been initialized */
+       if (hyper_dmabuf_private.exited == false &&
+           likely(xenstored_ready == 0)) {
+               dev_dbg(hyper_dmabuf_private.device,
+                       "Xenstore is not quite ready yet. Will retry it in 
500ms\n");
+               schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500));
+       } else {
+               xenbus_transaction_start(&xbt);
+
+               ret = xenbus_scanf(xbt, "domid","", "%d", &domid);
+
+               if (ret <= 0)
+                       domid = -1;
+
+               xenbus_transaction_end(xbt, 0);
+
+               /* try again since -1 is an invalid id for domain
+                * (but only if driver is still running) */
+               if (hyper_dmabuf_private.exited == false && unlikely(domid == 
-1)) {
+                       dev_dbg(hyper_dmabuf_private.device,
+                               "domid==-1 is invalid. Will retry it in 
500ms\n");
+                       schedule_delayed_work(&get_vm_id_work, 
msecs_to_jiffies(500));
+               } else {
+                       dev_info(hyper_dmabuf_private.device,
+                               "Successfully retrieved domid from 
Xenstore:%d\n", domid);
+                       hyper_dmabuf_private.domid = domid;
+               }
+       }
+}
+
 int hyper_dmabuf_xen_get_domid(void)
 {
        struct xenbus_transaction xbt;
        int domid;
 
+       if (unlikely(xenstored_ready == 0)) {
+               xen_get_domid_delayed(NULL);
+               return -1;
+       }
+
         xenbus_transaction_start(&xbt);
 
         if (!xenbus_scanf(xbt, "domid","", "%d", &domid)) {
                domid = -1;
         }
+
         xenbus_transaction_end(xbt, 0);
 
        return domid;
@@ -193,6 +243,8 @@ static void remote_dom_exporter_watch_cb(struct 
xenbus_watch *watch,
         * it means that remote domain has setup it for us and we should connect
         * to it.
         */
+
+
        ret = xen_comm_get_ring_details(hyper_dmabuf_xen_get_domid(), rdom,
                                        &grefid, &port);
 
@@ -389,6 +441,7 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
                return 0;
        }
 
+
        ret = xen_comm_get_ring_details(hyper_dmabuf_xen_get_domid(), domid,
                                        &rx_gref, &rx_port);
 
@@ -519,12 +572,108 @@ void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid)
        FRONT_RING_INIT(&(tx_ring_info->ring_front), 
tx_ring_info->ring_front.sring, PAGE_SIZE);
 }
 
+#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
+
+static void xen_rx_ch_add_delayed(struct work_struct *unused);
+
+static DECLARE_DELAYED_WORK(xen_rx_ch_auto_add_work, xen_rx_ch_add_delayed);
+
+#define DOMID_SCAN_START       1       /*  domid = 1 */
+#define DOMID_SCAN_END         10      /* domid = 10 */
+
+static void xen_rx_ch_add_delayed(struct work_struct *unused)
+{
+       int ret;
+       char buf[128];
+       int i, dummy;
+
+       dev_dbg(hyper_dmabuf_private.device,
+               "Scanning new tx channel comming from another domain\n");
+
+       /* check other domains and schedule another work if driver
+        * is still running and backend is valid
+        */
+       if (hyper_dmabuf_private.exited == false &&
+           hyper_dmabuf_private.backend_initialized == true) {
+               for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) {
+                       if (i == hyper_dmabuf_private.domid)
+                               continue;
+
+                       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", i,
+                               hyper_dmabuf_private.domid);
+
+                       ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy);
+
+                       if (ret > 0) {
+                               if (xen_comm_find_rx_ring(i) != NULL)
+                                       continue;
+
+                               ret = hyper_dmabuf_xen_init_rx_rbuf(i);
+
+                               if (!ret)
+                                       dev_info(hyper_dmabuf_private.device,
+                                                "Finishing up setting up rx 
channel for domain %d\n", i);
+                       }
+               }
+
+               /* check every 10 seconds */
+               schedule_delayed_work(&xen_rx_ch_auto_add_work, 
msecs_to_jiffies(10000));
+       }
+}
+
+#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */
+
+void xen_init_comm_env_delayed(struct work_struct *unused)
+{
+       int ret;
+
+       /* scheduling another work if driver is still running
+        * and xenstore hasn't been initialized or dom_id hasn't
+        * been correctly retrieved. */
+       if (hyper_dmabuf_private.exited == false &&
+           likely(xenstored_ready == 0 ||
+           hyper_dmabuf_private.domid == -1)) {
+               dev_dbg(hyper_dmabuf_private.device,
+                       "Xenstore is not ready yet. Re-try this again in 
500ms\n");
+               schedule_delayed_work(&xen_init_comm_env_work, 
msecs_to_jiffies(500));
+       } else {
+               ret = xen_comm_setup_data_dir();
+               if (ret < 0) {
+                       dev_err(hyper_dmabuf_private.device,
+                               "Failed to create data dir in Xenstore\n");
+               } else {
+                       dev_info(hyper_dmabuf_private.device,
+                               "Successfully finished comm env 
initialization\n");
+                       hyper_dmabuf_private.backend_initialized = true;
+
+#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
+                       xen_rx_ch_add_delayed(NULL);
+#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */
+               }
+       }
+}
+
 int hyper_dmabuf_xen_init_comm_env(void)
 {
        int ret;
 
        xen_comm_ring_table_init();
+
+       if (unlikely(xenstored_ready == 0 || hyper_dmabuf_private.domid == -1)) 
{
+               xen_init_comm_env_delayed(NULL);
+               return -1;
+       }
+
        ret = xen_comm_setup_data_dir();
+       if (ret < 0) {
+               dev_err(hyper_dmabuf_private.device,
+                       "Failed to create data dir in Xenstore\n");
+       } else {
+               dev_info(hyper_dmabuf_private.device,
+                       "Successfully finished comm env initialization\n");
+
+               hyper_dmabuf_private.backend_initialized = true;
+       }
 
        return ret;
 }
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.