WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] gntdev: switch back to rwlocks

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH] gntdev: switch back to rwlocks
From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Date: Fri, 9 Jul 2010 15:32:43 +0100
Cc: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
Delivery-date: Fri, 09 Jul 2010 07:32:55 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Alpine 2.00 (DEB 1167 2008-08-23)
Hi Jeremy,
this patch switches back from spinlocks to rwlocks in gntdev, solving
the locking issue that was preventing stubdoms from working.
In particular the problem is that gntdev_mmap calls apply_to_page_range
after acquiring the spinlock. apply_to_page_range causes the mmu
notifiers to be called, so mn_invl_range_start will be called that will
try to acquire again the same spinlock.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

---

diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a33e443..0a9a68c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -51,7 +51,7 @@ struct gntdev_priv {
        struct list_head maps;
        uint32_t used;
        uint32_t limit;
-       spinlock_t lock;
+       rwlock_t rwlock;
        struct mm_struct *mm;
        struct mmu_notifier mn;
 };
@@ -289,7 +289,7 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
        unsigned long mstart, mend;
        int err;
 
-       spin_lock(&priv->lock);
+       read_lock(&priv->rwlock);
        list_for_each_entry(map, &priv->maps, next) {
                if (!map->vma)
                        continue;
@@ -311,7 +311,7 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
                                        (mend - mstart) >> PAGE_SHIFT);
                WARN_ON(err);
        }
-       spin_unlock(&priv->lock);
+       read_unlock(&priv->rwlock);
 }
 
 static void mn_invl_page(struct mmu_notifier *mn,
@@ -328,7 +328,7 @@ static void mn_release(struct mmu_notifier *mn,
        struct grant_map *map;
        int err;
 
-       spin_lock(&priv->lock);
+       read_lock(&priv->rwlock);
        list_for_each_entry(map, &priv->maps, next) {
                if (!map->vma)
                        continue;
@@ -339,7 +339,7 @@ static void mn_release(struct mmu_notifier *mn,
                err = unmap_grant_pages(map, 0, map->count);
                WARN_ON(err);
        }
-       spin_unlock(&priv->lock);
+       read_unlock(&priv->rwlock);
 }
 
 struct mmu_notifier_ops gntdev_mmu_ops = {
@@ -359,7 +359,7 @@ static int gntdev_open(struct inode *inode, struct file 
*flip)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&priv->maps);
-       spin_lock_init(&priv->lock);
+       priv->rwlock = RW_LOCK_UNLOCKED;
        priv->limit = limit;
 
        priv->mm = get_task_mm(current);
@@ -387,7 +387,7 @@ static int gntdev_release(struct inode *inode, struct file 
*flip)
        if (debug)
                printk("%s: priv %p\n", __FUNCTION__, priv);
 
-       spin_lock(&priv->lock);
+       write_lock(&priv->rwlock);
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                err = gntdev_del_map(map);
@@ -395,7 +395,7 @@ static int gntdev_release(struct inode *inode, struct file 
*flip)
                        gntdev_free_map(map);
 
        }
-       spin_unlock(&priv->lock);
+       write_unlock(&priv->rwlock);
 
        mmu_notifier_unregister(&priv->mn, priv->mm);
        kfree(priv);
@@ -429,15 +429,15 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv 
*priv,
                return err;
        }
 
-       spin_lock(&priv->lock);
+       write_lock(&priv->rwlock);
        gntdev_add_map(priv, map);
        op.index = map->index << PAGE_SHIFT;
-       spin_unlock(&priv->lock);
+       write_unlock(&priv->rwlock);
 
        if (copy_to_user(u, &op, sizeof(op)) != 0) {
-               spin_lock(&priv->lock);
+               write_lock(&priv->rwlock);
                gntdev_del_map(map);
-               spin_unlock(&priv->lock);
+               write_unlock(&priv->rwlock);
                gntdev_free_map(map);
                return err;
        }
@@ -457,11 +457,11 @@ static long gntdev_ioctl_unmap_grant_ref(struct 
gntdev_priv *priv,
                printk("%s: priv %p, del %d+%d\n", __FUNCTION__, priv,
                       (int)op.index, (int)op.count);
 
-       spin_lock(&priv->lock);
+       write_lock(&priv->rwlock);
        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
        if (map)
                err = gntdev_del_map(map);
-       spin_unlock(&priv->lock);
+       write_unlock(&priv->rwlock);
        if (!err)
                gntdev_free_map(map);
        return err;
@@ -479,16 +479,16 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct 
gntdev_priv *priv,
                printk("%s: priv %p, offset for vaddr %lx\n", __FUNCTION__, 
priv,
                       (unsigned long)op.vaddr);
 
-       spin_lock(&priv->lock);
+       read_lock(&priv->rwlock);
        map = gntdev_find_map_vaddr(priv, op.vaddr);
        if (map == NULL ||
            map->vma->vm_start != op.vaddr) {
-               spin_unlock(&priv->lock);
+               read_unlock(&priv->rwlock);
                return -EINVAL;
        }
        op.offset = map->index << PAGE_SHIFT;
        op.count = map->count;
-       spin_unlock(&priv->lock);
+       read_unlock(&priv->rwlock);
 
        if (copy_to_user(u, &op, sizeof(op)) != 0)
                return -EFAULT;
@@ -507,9 +507,9 @@ static long gntdev_ioctl_set_max_grants(struct gntdev_priv 
*priv,
        if (op.count > limit)
                return -EINVAL;
 
-       spin_lock(&priv->lock);
+       write_lock(&priv->rwlock);
        priv->limit = op.count;
-       spin_unlock(&priv->lock);
+       write_unlock(&priv->rwlock);
        return 0;
 }
 
@@ -557,7 +557,7 @@ static int gntdev_mmap(struct file *flip, struct 
vm_area_struct *vma)
                printk("%s: map %d+%d at %lx (pgoff %lx)\n", __FUNCTION__,
                       index, count, vma->vm_start, vma->vm_pgoff);
 
-       spin_lock(&priv->lock);
+       read_lock(&priv->rwlock);
        map = gntdev_find_map_index(priv, index, count);
        if (!map)
                goto unlock_out;
@@ -599,7 +599,7 @@ static int gntdev_mmap(struct file *flip, struct 
vm_area_struct *vma)
        map->is_mapped = 1;
 
 unlock_out:
-       spin_unlock(&priv->lock);
+       read_unlock(&priv->rwlock);
        return err;
 }
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel