[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v2] xen/privcmd: Convert get_user_pages*() to pin_user_pages*()



In 2019, we introduced pin_user_pages*() and now we are converting
get_user_pages*() to the new API as appropriate. [1] & [2] could
be referred for more information. This is case 5 as per document [1].

As discussed, pages need to be marked as dirty before unpinned it.

Previously, if lock_pages() end up partially mapping pages, it used
to return -ERRNO due to which unlock_pages() have to go through
each pages[i] till *nr_pages* to validate them. This can be avoided
by passing correct number partially mapped pages & -ERRNO separately
while returning from lock_pages() due to error.
With this fix unlock_pages() doesn't need to validate pages[i] till
*nr_pages* for error scenario.

[1] Documentation/core-api/pin_user_pages.rst

[2] "Explicit pinning of user-space pages":
        https://lwn.net/Articles/807108/

Signed-off-by: Souptick Joarder <jrdr.linux@xxxxxxxxx>
Cc: John Hubbard <jhubbard@xxxxxxxxxx>
Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 drivers/xen/privcmd.c | 33 +++++++++++++++++++--------------
 1 file changed, 19 insertions(+), 14 deletions(-)

diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index a250d11..eea90cd 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -580,25 +580,30 @@ static long privcmd_ioctl_mmap_batch(
 
 static int lock_pages(
        struct privcmd_dm_op_buf kbufs[], unsigned int num,
-       struct page *pages[], unsigned int nr_pages)
+       struct page *pages[], unsigned int nr_pages, int *errno)
 {
        unsigned int i;
+       int pinned = 0, rc = 0;
 
        for (i = 0; i < num; i++) {
                unsigned int requested;
-               int pinned;
 
+               rc += pinned;
                requested = DIV_ROUND_UP(
                        offset_in_page(kbufs[i].uptr) + kbufs[i].size,
                        PAGE_SIZE);
-               if (requested > nr_pages)
-                       return -ENOSPC;
+               if (requested > nr_pages) {
+                       *errno = -ENOSPC;
+                       return rc;
+               }
 
-               pinned = get_user_pages_fast(
+               pinned = pin_user_pages_fast(
                        (unsigned long) kbufs[i].uptr,
                        requested, FOLL_WRITE, pages);
-               if (pinned < 0)
-                       return pinned;
+               if (pinned < 0) {
+                       *errno = pinned;
+                       return rc;
+               }
 
                nr_pages -= pinned;
                pages += pinned;
@@ -613,11 +618,7 @@ static void unlock_pages(struct page *pages[], unsigned 
int nr_pages)
 
        if (!pages)
                return;
-
-       for (i = 0; i < nr_pages; i++) {
-               if (pages[i])
-                       put_page(pages[i]);
-       }
+       unpin_user_pages_dirty_lock(pages, nr_pages, 1);
 }
 
 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
@@ -630,6 +631,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void 
__user *udata)
        struct xen_dm_op_buf *xbufs = NULL;
        unsigned int i;
        long rc;
+       int errno = 0;
 
        if (copy_from_user(&kdata, udata, sizeof(kdata)))
                return -EFAULT;
@@ -683,9 +685,12 @@ static long privcmd_ioctl_dm_op(struct file *file, void 
__user *udata)
                goto out;
        }
 
-       rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
-       if (rc)
+       rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &errno);
+       if (errno < 0) {
+               nr_pages = rc;
+               rc = errno;
                goto out;
+       }
 
        for (i = 0; i < kdata.num; i++) {
                set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
-- 
1.9.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.