[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/3] tools/libxencall: use hypercall buffer device if available



Instead of using anonymous memory for hypercall buffers which is then
locked into memory, use the hypercall buffer device of the Linux
privcmd driver if available.

This has the advantage of needing just a single mmap() for allocating
the buffer and page migration or compaction can't make the buffer
unaccessible for the hypervisor.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 tools/libs/call/core.c    |  8 +++++++-
 tools/libs/call/linux.c   | 29 +++++++++++++++++++++++++++--
 tools/libs/call/private.h |  1 +
 3 files changed, 35 insertions(+), 3 deletions(-)

diff --git a/tools/libs/call/core.c b/tools/libs/call/core.c
index f3a34009da..c85ee1936d 100644
--- a/tools/libs/call/core.c
+++ b/tools/libs/call/core.c
@@ -19,7 +19,12 @@
 
 static int all_restrict_cb(Xentoolcore__Active_Handle *ah, domid_t domid) {
     xencall_handle *xcall = CONTAINER_OF(ah, *xcall, tc_ah);
-    return xentoolcore__restrict_by_dup2_null(xcall->fd);
+    int rc;
+
+    rc = xentoolcore__restrict_by_dup2_null(xcall->buf_fd);
+    if ( !rc )
+        rc = xentoolcore__restrict_by_dup2_null(xcall->fd);
+    return rc;
 }
 
 xencall_handle *xencall_open(xentoollog_logger *logger, unsigned open_flags)
@@ -30,6 +35,7 @@ xencall_handle *xencall_open(xentoollog_logger *logger, 
unsigned open_flags)
     if (!xcall) return NULL;
 
     xcall->fd = -1;
+    xcall->buf_fd = -1;
     xcall->tc_ah.restrict_callback = all_restrict_cb;
     xentoolcore__register_active_handle(&xcall->tc_ah);
 
diff --git a/tools/libs/call/linux.c b/tools/libs/call/linux.c
index 3f1b691fe7..fc1125972d 100644
--- a/tools/libs/call/linux.c
+++ b/tools/libs/call/linux.c
@@ -56,12 +56,27 @@ int osdep_xencall_open(xencall_handle *xcall)
     }
 
     xcall->fd = fd;
+
+    /*
+     * Try the same for the hypercall buffer device.
+     */
+    fd = open("/dev/xen/privcmd-buf", O_RDWR|O_CLOEXEC);
+    if ( fd == -1 && ( errno == ENOENT || errno == ENXIO || errno == ENODEV ) )
+    {
+        /* Fallback to /proc/xen/privcmd-buf */
+        fd = open("/proc/xen/privcmd-buf", O_RDWR|O_CLOEXEC);
+    }
+    xcall->buf_fd = fd;
+
     return 0;
 }
 
 int osdep_xencall_close(xencall_handle *xcall)
 {
     int fd = xcall->fd;
+
+    if ( xcall->buf_fd >= 0 )
+        close(xcall->buf_fd);
     if (fd == -1)
         return 0;
     return close(fd);
@@ -78,6 +93,14 @@ void *osdep_alloc_pages(xencall_handle *xcall, size_t npages)
     void *p;
     int rc, i, saved_errno;
 
+    if ( xcall->buf_fd >= 0 )
+    {
+        p = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, xcall->buf_fd, 
0);
+        if ( p == MAP_FAILED )
+            PERROR("alloc_pages: mmap failed");
+        return p;
+    }
+
     /* Address returned by mmap is page aligned. */
     p = mmap(NULL, size, PROT_READ|PROT_WRITE, 
MAP_PRIVATE|MAP_ANONYMOUS|MAP_LOCKED, -1, 0);
     if ( p == MAP_FAILED )
@@ -119,8 +142,10 @@ out:
 void osdep_free_pages(xencall_handle *xcall, void *ptr, size_t npages)
 {
     int saved_errno = errno;
-    /* Recover the VMA flags. Maybe it's not necessary */
-    madvise(ptr, npages * PAGE_SIZE, MADV_DOFORK);
+
+    if ( xcall->buf_fd < 0 )
+        /* Recover the VMA flags. Maybe it's not necessary */
+        madvise(ptr, npages * PAGE_SIZE, MADV_DOFORK);
 
     munmap(ptr, npages * PAGE_SIZE);
     /* We MUST propagate the hypercall errno, not unmap call's. */
diff --git a/tools/libs/call/private.h b/tools/libs/call/private.h
index 533f0c4a8b..06d159cfb8 100644
--- a/tools/libs/call/private.h
+++ b/tools/libs/call/private.h
@@ -21,6 +21,7 @@ struct xencall_handle {
     xentoollog_logger *logger, *logger_tofree;
     unsigned flags;
     int fd;
+    int buf_fd;
     Xentoolcore__Active_Handle tc_ah;
 
     /*
-- 
2.13.7


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.