[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/2] xen/gntdev.c: Mark pages as dirty



There seems to be a bug in the original code when gntdev_get_page()
is called with writeable=true then the page needs to be marked dirty
before being put.

To address this, a bool writeable is added in gnt_dev_copy_batch, set
it in gntdev_grant_copy_seg() (and drop `writeable` argument to
gntdev_get_page()) and then, based on batch->writeable, use
set_page_dirty_lock().

Fixes: a4cdb556cae0 (xen/gntdev: add ioctl for grant copy)
Suggested-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Signed-off-by: Souptick Joarder <jrdr.linux@xxxxxxxxx>
Cc: John Hubbard <jhubbard@xxxxxxxxxx>
Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Cc: Juergen Gross <jgross@xxxxxxxx>
Cc: David Vrabel <david.vrabel@xxxxxxxxxx>
---
 drivers/xen/gntdev.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 64a9025a..5e1411b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -720,17 +720,18 @@ struct gntdev_copy_batch {
        s16 __user *status[GNTDEV_COPY_BATCH];
        unsigned int nr_ops;
        unsigned int nr_pages;
+       bool writeable;
 };
 
 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
-                          bool writeable, unsigned long *gfn)
+                               unsigned long *gfn)
 {
        unsigned long addr = (unsigned long)virt;
        struct page *page;
        unsigned long xen_pfn;
        int ret;
 
-       ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
+       ret = get_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, 
&page);
        if (ret < 0)
                return ret;
 
@@ -746,9 +747,13 @@ static void gntdev_put_pages(struct gntdev_copy_batch 
*batch)
 {
        unsigned int i;
 
-       for (i = 0; i < batch->nr_pages; i++)
+       for (i = 0; i < batch->nr_pages; i++) {
+               if (batch->writeable && !PageDirty(batch->pages[i]))
+                       set_page_dirty_lock(batch->pages[i]);
                put_page(batch->pages[i]);
+       }
        batch->nr_pages = 0;
+       batch->writeable = false;
 }
 
 static int gntdev_copy(struct gntdev_copy_batch *batch)
@@ -837,8 +842,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch 
*batch,
                        virt = seg->source.virt + copied;
                        off = (unsigned long)virt & ~XEN_PAGE_MASK;
                        len = min(len, (size_t)XEN_PAGE_SIZE - off);
+                       batch->writeable = false;
 
-                       ret = gntdev_get_page(batch, virt, false, &gfn);
+                       ret = gntdev_get_page(batch, virt, &gfn);
                        if (ret < 0)
                                return ret;
 
@@ -856,8 +862,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch 
*batch,
                        virt = seg->dest.virt + copied;
                        off = (unsigned long)virt & ~XEN_PAGE_MASK;
                        len = min(len, (size_t)XEN_PAGE_SIZE - off);
+                       batch->writeable = true;
 
-                       ret = gntdev_get_page(batch, virt, true, &gfn);
+                       ret = gntdev_get_page(batch, virt, &gfn);
                        if (ret < 0)
                                return ret;
 
-- 
1.9.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.