[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH QEMU-XEN v3 3/8] xen: Switch uses of xc_map_foreign_range into xc_map_foreign_bulk



In Xen 4.7 we are refactoring parts libxenctrl into a number of
separate libraries which will provide backward and forward API and ABI
compatiblity.

One such library will be libxenforeignmemory which provides access to
privileged foreign mappings and which will provide an interface
equivalent to xc_map_foreign_bulk.

In preparation for this switch all uses of xc_map_foreign_range to
xc_map_foreign_bulk. This is trivial because size was always
XC_PAGE_SIZE so the necessary adjustments are trivial:

  * Pass &mfn (an array of length 1) instead of mfn
  * Add a local err variable to receive the errors and pass &err
    (again, an array of length 1)
  * Adjust any existing logging to include err too
  * Pass nr_pages=1 instead of size=XC_PAGE_SIZE

There is one wrinkle in xen_console.c:con_initialise() where
con->ring_ref is an int but can in some code paths (when !xendev->dev)
be treated as an mfn. I think this is an existing latent truncation
hazard on platforms where xen_pfn_t is 64-bit and int is 32-bit (e.g.
amd64, both arm* variants). I'm unsure under what circumstances
xendev->dev can be NULL or if anything elsewhere ensures the value
fits into an int. For now I just use a temporary xen_pfn_t to in
effect upcast the pointer from int* to xen_pfn_t*.

Build tested with 4.0 and 4.5.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 hw/char/xen_console.c |  9 +++++----
 hw/display/xenfb.c    |  6 +++---
 xen-hvm.c             | 29 +++++++++++++++--------------
 3 files changed, 23 insertions(+), 21 deletions(-)

diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c
index c5a023c..6ef477a 100644
--- a/hw/char/xen_console.c
+++ b/hw/char/xen_console.c
@@ -219,6 +219,7 @@ static int con_initialise(struct XenDevice *xendev)
 {
     struct XenConsole *con = container_of(xendev, struct XenConsole, xendev);
     int limit;
+    int err;
 
     if (xenstore_read_int(con->console, "ring-ref", &con->ring_ref) == -1)
        return -1;
@@ -228,10 +229,10 @@ static int con_initialise(struct XenDevice *xendev)
        con->buffer.max_capacity = limit;
 
     if (!xendev->dev) {
-        con->sring = xc_map_foreign_range(xen_xc, con->xendev.dom,
-                                          XC_PAGE_SIZE,
-                                          PROT_READ|PROT_WRITE,
-                                          con->ring_ref);
+       xen_pfn_t mfn = con->ring_ref;
+        con->sring = xc_map_foreign_bulk(xen_xc, con->xendev.dom,
+                                        PROT_READ|PROT_WRITE,
+                                        &mfn, &err, 1);
     } else {
         con->sring = xengnttab_map_grant_ref(xendev->gnttabdev, 
con->xendev.dom,
                                              con->ring_ref,
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 8a61e95..e3fdf33 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -94,6 +94,7 @@ struct XenFB {
 static int common_bind(struct common *c)
 {
     uint64_t mfn;
+    int err;
 
     if (xenstore_read_fe_uint64(&c->xendev, "page-ref", &mfn) == -1)
        return -1;
@@ -102,9 +103,8 @@ static int common_bind(struct common *c)
     if (xenstore_read_fe_int(&c->xendev, "event-channel", 
&c->xendev.remote_port) == -1)
        return -1;
 
-    c->page = xc_map_foreign_range(xen_xc, c->xendev.dom,
-                                  XC_PAGE_SIZE,
-                                  PROT_READ | PROT_WRITE, mfn);
+    c->page = xc_map_foreign_bulk(xen_xc, c->xendev.dom,
+                                 PROT_READ | PROT_WRITE, &mfn, &err, 1);
     if (c->page == NULL)
        return -1;
 
diff --git a/xen-hvm.c b/xen-hvm.c
index 54cbd72..3a56099 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -1166,7 +1166,7 @@ static void xen_wakeup_notifier(Notifier *notifier, void 
*data)
 int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
                  MemoryRegion **ram_memory)
 {
-    int i, rc;
+    int i, rc, map_err;
     xen_pfn_t ioreq_pfn;
     xen_pfn_t bufioreq_pfn;
     evtchn_port_t bufioreq_evtchn;
@@ -1213,33 +1213,34 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, 
ram_addr_t *above_4g_mem_size,
     DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
     DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
 
-    state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
-                                              PROT_READ|PROT_WRITE, ioreq_pfn);
+    state->shared_page = xc_map_foreign_bulk(xen_xc, xen_domid,
+                                            PROT_READ|PROT_WRITE,
+                                            &ioreq_pfn, &map_err, 1);
     if (state->shared_page == NULL) {
-        hw_error("map shared IO page returned error %d handle=" 
XC_INTERFACE_FMT,
-                 errno, xen_xc);
+        hw_error("map shared IO page returned error %d (%d) handle=" 
XC_INTERFACE_FMT,
+                 errno, map_err, xen_xc);
     }
 
     rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
     if (!rc) {
         DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
         state->shared_vmport_page =
-            xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
-                                 PROT_READ|PROT_WRITE, ioreq_pfn);
+               xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE,
+                                   &ioreq_pfn, &map_err, 1);
         if (state->shared_vmport_page == NULL) {
-            hw_error("map shared vmport IO page returned error %d handle="
-                     XC_INTERFACE_FMT, errno, xen_xc);
+            hw_error("map shared vmport IO page returned error %d (%d) handle="
+                     XC_INTERFACE_FMT, errno, map_err, xen_xc);
         }
     } else if (rc != -ENOSYS) {
         hw_error("get vmport regs pfn returned error %d, rc=%d", errno, rc);
     }
 
-    state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid,
-                                                   XC_PAGE_SIZE,
-                                                   PROT_READ|PROT_WRITE,
-                                                   bufioreq_pfn);
+    state->buffered_io_page = xc_map_foreign_bulk(xen_xc, xen_domid,
+                                                 PROT_READ|PROT_WRITE,
+                                                  &bufioreq_pfn,
+                                                 &map_err, 1);
     if (state->buffered_io_page == NULL) {
-        hw_error("map buffered IO page returned error %d", errno);
+        hw_error("map buffered IO page returned error %d (%d)", errno, 
map_err);
     }
 
     /* Note: cpus is empty at this point in init */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.