[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 26/60] hyper_dmabuf: add mutexes to prevent several race conditions



From: Mateusz Polrola <mateuszx.potrola@xxxxxxxxx>

Added mutex to export_fd ioctl to prevent double pages mapping of the
same buffer to prevent race condition when two consumers are trying to
map the same buffer on importer VM.

Also locked mutex before sending request via xen communication channel
to prevent req_pending override by another caller.

Signed-off-by: Dongwon Kim <dongwon.kim@xxxxxxxxx>
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c          |  2 ++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h          |  1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c        |  6 ++++++
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c | 10 ++++++++++
 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h |  1 +
 5 files changed, 20 insertions(+)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 569b95e..584d55d 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -56,6 +56,8 @@ static int __init hyper_dmabuf_drv_init(void)
 
        printk( KERN_NOTICE "hyper_dmabuf_starting: Initialization started" );
 
+       mutex_init(&hyper_dmabuf_private.lock);
+
        ret = register_device();
        if (ret < 0) {
                return -EINVAL;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 0b1441e..8445416 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -76,6 +76,7 @@ struct hyper_dmabuf_private {
 
        /* backend ops - hypervisor specific */
        struct hyper_dmabuf_backend_ops *backend_ops;
+       struct mutex lock;
 };
 
 #endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 018de8c..8851a9c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -260,6 +260,8 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
        if (sgt_info == NULL || !sgt_info->valid) /* can't find sgt from the 
table */
                return -1;
 
+       mutex_lock(&hyper_dmabuf_private.lock);
+
        sgt_info->num_importers++;
 
        /* send notification for export_fd to exporter */
@@ -274,6 +276,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
                kfree(req);
                dev_err(hyper_dmabuf_private.device, "Failed to create sgt or 
notify exporter\n");
                sgt_info->num_importers--;
+               mutex_unlock(&hyper_dmabuf_private.lock);
                return -EINVAL;
        }
        kfree(req);
@@ -282,6 +285,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
                dev_err(hyper_dmabuf_private.device,
                        "Buffer invalid\n");
                sgt_info->num_importers--;
+               mutex_unlock(&hyper_dmabuf_private.lock);
                return -1;
        } else {
                dev_dbg(hyper_dmabuf_private.device, "Can import buffer\n");
@@ -303,6 +307,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
 
                if (!data_pages) {
                        sgt_info->num_importers--;
+                       mutex_unlock(&hyper_dmabuf_private.lock);
                        return -EINVAL;
                }
 
@@ -318,6 +323,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
                ret = export_fd_attr->fd;
        }
 
+       mutex_unlock(&hyper_dmabuf_private.lock);
        dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
        return 0;
 }
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index a8cce26..9d67b47 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -278,6 +278,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
        ring_info->irq = ret;
        ring_info->port = alloc_unbound.port;
 
+       mutex_init(&ring_info->lock);
+
        dev_dbg(hyper_dmabuf_private.device,
                "%s: allocated eventchannel gref %d  port: %d  irq: %d\n",
                __func__,
@@ -512,6 +514,9 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req, int wait)
                return -EINVAL;
        }
 
+
+       mutex_lock(&ring_info->lock);
+
        ring = &ring_info->ring_front;
 
        if (RING_FULL(ring))
@@ -519,6 +524,7 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req, int wait)
 
        new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
        if (!new_req) {
+               mutex_unlock(&ring_info->lock);
                dev_err(hyper_dmabuf_private.device,
                        "NULL REQUEST\n");
                return -EIO;
@@ -548,13 +554,17 @@ int hyper_dmabuf_xen_send_req(int domid, struct 
hyper_dmabuf_req *req, int wait)
                }
 
                if (timeout < 0) {
+                       mutex_unlock(&ring_info->lock);
                        dev_err(hyper_dmabuf_private.device, "request 
timed-out\n");
                        return -EBUSY;
                }
 
+               mutex_unlock(&ring_info->lock);
                return req_pending.status;
        }
 
+       mutex_unlock(&ring_info->lock);
+
        return 0;
 }
 
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
index 9c93165..0533e4d 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
@@ -39,6 +39,7 @@ struct xen_comm_tx_ring_info {
         int gref_ring;
         int irq;
         int port;
+       struct mutex lock;
        struct xenbus_watch watch;
 };
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.