[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 24/60] hyper_dmabuf: waits for resp only if WAIT_AFTER_SYNC_REQ == 1



hyper_dmabuf's sync_request (previously hyper_dmabuf_sync_request_
and_wait) now does not wait for the response from exporter if
WAIT_AFTER_SYNC_REQ==0. This is to prevent peformance degradation
due to the communication latency while doing indirect hyper DMABUF
synchronization.

This patch also includes some minor changes as followed:

1. hyper_dmabuf_free_sgt is removed. Now we call sg_free_table and
   kfree directly from all the places where this function was executed.
   This was done for conciseness.

2. changed hyper_dmabuf_get_domid to hyper_dmabuf_xen_get_domid for
   consistence in func names in the backend.

3. some minor clean-ups

Signed-off-by: Dongwon Kim <dongwon.kim@xxxxxxxxx>
---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h       |  2 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c        | 91 +++++++++++-----------
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c        |  2 -
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c    |  2 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 14 ++--
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h   |  2 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c    |  2 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c    | 21 +++--
 8 files changed, 69 insertions(+), 67 deletions(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
index ee1886c..d5125f2 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
@@ -23,5 +23,3 @@
  */
 
 /* configuration */
-
-#define CURRENT_TARGET XEN
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index a017070..d7a35fc 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -131,7 +131,7 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct 
sg_table *sgt)
 
 /* create sg_table with given pages and other parameters */
 struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
-                               int frst_ofst, int last_len, int nents)
+                                        int frst_ofst, int last_len, int nents)
 {
        struct sg_table *sgt;
        struct scatterlist *sgl;
@@ -144,7 +144,11 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
 
        ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
        if (ret) {
-               hyper_dmabuf_free_sgt(sgt);
+               if (sgt) {
+                       sg_free_table(sgt);
+                       kfree(sgt);
+               }
+
                return NULL;
        }
 
@@ -165,15 +169,6 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page 
**pages,
        return sgt;
 }
 
-/* free sg_table */
-void hyper_dmabuf_free_sgt(struct sg_table* sgt)
-{
-       if (sgt) {
-               sg_free_table(sgt);
-               kfree(sgt);
-       }
-}
-
 int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int 
force)
 {
        struct sgt_list *sgtl;
@@ -264,7 +259,9 @@ int hyper_dmabuf_cleanup_sgt_info(struct 
hyper_dmabuf_sgt_info *sgt_info, int fo
        return 0;
 }
 
-inline int hyper_dmabuf_sync_request_and_wait(int id, int dmabuf_ops)
+#define WAIT_AFTER_SYNC_REQ 1
+
+inline int hyper_dmabuf_sync_request(int id, int dmabuf_ops)
 {
        struct hyper_dmabuf_req *req;
        struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
@@ -279,7 +276,7 @@ inline int hyper_dmabuf_sync_request_and_wait(int id, int 
dmabuf_ops)
        hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, 
&operands[0]);
 
        /* send request and wait for a response */
-       ret = ops->send_req(HYPER_DMABUF_DOM_ID(id), req, true);
+       ret = ops->send_req(HYPER_DMABUF_DOM_ID(id), req, WAIT_AFTER_SYNC_REQ);
 
        kfree(req);
 
@@ -297,8 +294,8 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, 
struct device* dev,
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info 
*)attach->dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                                HYPER_DMABUF_OPS_ATTACH);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_ATTACH);
 
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
@@ -319,8 +316,8 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, 
struct dma_buf_attac
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info 
*)attach->dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                                HYPER_DMABUF_OPS_DETACH);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_DETACH);
 
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
@@ -354,8 +351,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct 
dma_buf_attachment *attachme
                 goto err_free_sg;
         }
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_MAP);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_MAP);
 
        kfree(page_info->pages);
        kfree(page_info);
@@ -390,8 +387,8 @@ static void hyper_dmabuf_ops_unmap(struct 
dma_buf_attachment *attachment,
        sg_free_table(sg);
        kfree(sg);
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_UNMAP);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_UNMAP);
 
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
@@ -419,19 +416,23 @@ static void hyper_dmabuf_ops_release(struct dma_buf 
*dma_buf)
 
        if (sgt_info->num_importers == 0) {
                ops->unmap_shared_pages(&sgt_info->refs_info, sgt_info->nents);
-               hyper_dmabuf_free_sgt(sgt_info->sgt);
-               sgt_info->sgt = NULL;
+
+               if (sgt_info->sgt) {
+                       sg_free_table(sgt_info->sgt);
+                       kfree(sgt_info->sgt);
+                       sgt_info->sgt = NULL;
+               }
        }
 
        final_release = sgt_info && !sgt_info->valid &&
                        !sgt_info->num_importers;
 
        if (final_release) {
-               ret = 
hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                                       
HYPER_DMABUF_OPS_RELEASE_FINAL);
+               ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                               HYPER_DMABUF_OPS_RELEASE_FINAL);
        } else {
-               ret = 
hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                                       
HYPER_DMABUF_OPS_RELEASE);
+               ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                               HYPER_DMABUF_OPS_RELEASE);
        }
 
        if (ret < 0) {
@@ -459,8 +460,8 @@ static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf 
*dmabuf, enum dma_da
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               
HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -479,8 +480,8 @@ static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf 
*dmabuf, enum dma_data
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               
HYPER_DMABUF_OPS_END_CPU_ACCESS);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_END_CPU_ACCESS);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -499,8 +500,8 @@ static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf 
*dmabuf, unsigned long
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_KMAP_ATOMIC);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_KMAP_ATOMIC);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -519,8 +520,8 @@ static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf 
*dmabuf, unsigned long
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -537,8 +538,8 @@ static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, 
unsigned long pgnum)
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_KMAP);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_KMAP);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -557,8 +558,8 @@ static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, 
unsigned long pgnum,
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_KUNMAP);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_KUNMAP);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -575,8 +576,8 @@ static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, 
struct vm_area_struct *
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_MMAP);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_MMAP);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -595,8 +596,8 @@ static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_VMAP);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_VMAP);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
@@ -615,8 +616,8 @@ static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, 
void *vaddr)
 
        sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
 
-       ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
-                                               HYPER_DMABUF_OPS_VUNMAP);
+       ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+                                       HYPER_DMABUF_OPS_VUNMAP);
        if (ret < 0) {
                dev_err(hyper_dmabuf_private.device,
                        "hyper_dmabuf::%s Error:send dmabuf sync request 
failed\n", __func__);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index b9bd6d8..c99176ac 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -39,8 +39,6 @@
 #include "hyper_dmabuf_remote_sync.h"
 #include "hyper_dmabuf_list.h"
 
-#define FORCED_UNEXPORTING 0
-
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
 struct cmd_process {
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c 
b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
index 4c28f11..f93c936 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -53,7 +53,7 @@ extern struct hyper_dmabuf_private hyper_dmabuf_private;
  * later when unmapping operations are invoked to free those.
  *
  * The very first element on the bottom of each stack holds
- * are what is created when initial exporting is issued so it
+ * is what is created when initial exporting is issued so it
  * should not be modified or released by this fuction.
  */
 int hyper_dmabuf_remote_sync(int id, int ops)
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index 14336c9..ba6b126 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -53,7 +53,7 @@ static int xen_comm_setup_data_dir(void)
 {
        char buf[255];
 
-       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_get_domid());
+       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_xen_get_domid());
        return xenbus_mkdir(XBT_NIL, buf, "");
 }
 
@@ -67,7 +67,7 @@ static int xen_comm_destroy_data_dir(void)
 {
        char buf[255];
 
-       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_get_domid());
+       sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", 
hyper_dmabuf_xen_get_domid());
        return xenbus_rm(XBT_NIL, buf, "");
 }
 
@@ -130,7 +130,7 @@ static int xen_comm_get_ring_details(int domid, int rdomid, 
int *grefid, int *po
        return (ret <= 0 ? 1 : 0);
 }
 
-int hyper_dmabuf_get_domid(void)
+int hyper_dmabuf_xen_get_domid(void)
 {
        struct xenbus_transaction xbt;
        int domid;
@@ -192,7 +192,7 @@ static void remote_dom_exporter_watch_cb(struct 
xenbus_watch *watch,
         * it means that remote domain has setup it for us and we should connect
         * to it.
         */
-       ret = xen_comm_get_ring_details(hyper_dmabuf_get_domid(), rdom,
+       ret = xen_comm_get_ring_details(hyper_dmabuf_xen_get_domid(), rdom,
                                        &grefid, &port);
 
        if (ring_info && ret != 0) {
@@ -287,7 +287,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 
        ret = xen_comm_add_tx_ring(ring_info);
 
-       ret = xen_comm_expose_ring_details(hyper_dmabuf_get_domid(), domid,
+       ret = xen_comm_expose_ring_details(hyper_dmabuf_xen_get_domid(), domid,
                                           ring_info->gref_ring, 
ring_info->port);
 
        /*
@@ -299,7 +299,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
        ring_info->watch.node = (const char*) kmalloc(sizeof(char) * 255, 
GFP_KERNEL);
        sprintf((char*)ring_info->watch.node,
                "/local/domain/%d/data/hyper_dmabuf/%d/port",
-               domid, hyper_dmabuf_get_domid());
+               domid, hyper_dmabuf_xen_get_domid());
 
        register_xenbus_watch(&ring_info->watch);
 
@@ -368,7 +368,7 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
                return 0;
        }
 
-       ret = xen_comm_get_ring_details(hyper_dmabuf_get_domid(), domid,
+       ret = xen_comm_get_ring_details(hyper_dmabuf_xen_get_domid(), domid,
                                        &rx_gref, &rx_port);
 
        if (ret) {
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
index 298af08..9c93165 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
@@ -50,7 +50,7 @@ struct xen_comm_rx_ring_info {
        struct gnttab_unmap_grant_ref unmap_op;
 };
 
-int hyper_dmabuf_get_domid(void);
+int hyper_dmabuf_xen_get_domid(void);
 
 int hyper_dmabuf_xen_init_comm_env(void);
 
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
index 6afb520..aa4c2f5 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
@@ -37,7 +37,7 @@
 #include "hyper_dmabuf_xen_shm.h"
 
 struct hyper_dmabuf_backend_ops xen_backend_ops = {
-       .get_vm_id = hyper_dmabuf_get_domid,
+       .get_vm_id = hyper_dmabuf_xen_get_domid,
        .share_pages = hyper_dmabuf_xen_share_pages,
        .unshare_pages = hyper_dmabuf_xen_unshare_pages,
        .map_shared_pages = (void *)hyper_dmabuf_xen_map_shared_pages,
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c 
b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index 122aac1..b158c11 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -108,8 +108,8 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int 
domid, int nents,
        /* Share 2nd level addressing pages in readonly mode*/
        for (i=0; i< n_lvl2_grefs; i++) {
                lvl3_table[i] = gnttab_grant_foreign_access(domid,
-                                                          
virt_to_mfn((unsigned long)lvl2_table+i*PAGE_SIZE ),
-                                                          1);
+                                                           
virt_to_mfn((unsigned long)lvl2_table+i*PAGE_SIZE ),
+                                                           1);
        }
 
        /* Share lvl3_table in readonly mode*/
@@ -240,10 +240,12 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
 
        lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page));
 
-       gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table, 
GNTMAP_host_map | GNTMAP_readonly,
+       gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table,
+                         GNTMAP_host_map | GNTMAP_readonly,
                          (grant_ref_t)lvl3_gref, domid);
 
-       gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table, 
GNTMAP_host_map | GNTMAP_readonly, -1);
+       gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table,
+                           GNTMAP_host_map | GNTMAP_readonly, -1);
 
        if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) {
                dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref 
failed");
@@ -285,8 +287,9 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
        /* Checks if pages were mapped correctly */
        for (i = 0; i < n_lvl2_grefs; i++) {
                if (lvl2_map_ops[i].status) {
-                       dev_err(hyper_dmabuf_private.device, "HYPERVISOR map 
grant ref failed status = %d",
-                              lvl2_map_ops[i].status);
+                       dev_err(hyper_dmabuf_private.device,
+                               "HYPERVISOR map grant ref failed status = %d",
+                               lvl2_map_ops[i].status);
                        return NULL;
                } else {
                        lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle;
@@ -344,7 +347,8 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int 
lvl3_gref, int domid, int n
 
        for (i = 0; i < nents; i++) {
                if (data_map_ops[i].status) {
-                       dev_err(hyper_dmabuf_private.device, "HYPERVISOR map 
grant ref failed status = %d\n",
+                       dev_err(hyper_dmabuf_private.device,
+                               "HYPERVISOR map grant ref failed status = %d\n",
                                data_map_ops[i].status);
                        return NULL;
                } else {
@@ -376,7 +380,8 @@ int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, 
int nents) {
 
        if (sh_pages_info->unmap_ops == NULL ||
            sh_pages_info->data_pages == NULL) {
-               dev_warn(hyper_dmabuf_private.device, "Imported pages already 
cleaned up or buffer was not imported yet\n");
+               dev_warn(hyper_dmabuf_private.device,
+                        "Imported pages already cleaned up or buffer was not 
imported yet\n");
                return 0;
        }
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.