[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/1] qemu-qdisk: indirect descriptors



Introduction of indirect descriptors for qdisk.

Changes in the xen_blkif.h file:
 - struct blkif_x86_**_request contains union of
   'struct blkif_x86_**_request_direct' (previous struct blkif_x86_**_request)
   and 'struct blkif_x86_**_request_indirect'
 - new helper functions to rewrite 'struct blkif_x86_**_request_**'
   to struct 'blkif_request_local' named like that to not interfer with
   'blkif_request' from "tools/include/xen/io/blkif.h"
 - a set of macros to maintain the indirect descriptors

Changes in the xen_disk.c file:
 - a new boolean feature_indirect member
 - a new helper function ioreq_get_operation_and_nr_segments
 - a new ioreq_parse_indirect function called when 'BLKIF_OP_INDIRECT'
   occurs. The function grant maps the pages with indirect descriptors and copy
   the segments to a local seg[MAX_INDIRECT_SEGMENTS] tabel placed in ioreq.

   After that the ioreq_parse function proceedes withoth changes. For
   direct request segments are mem-copied to the ioreq page.

Signed-off-by: Paulina Szubarczyk <paulinaszubarczyk@xxxxxxxxx>
---
 hw/block/xen_blkif.h         | 151 ++++++++++++++++++++++++++++++----
 hw/block/xen_disk.c          | 187 ++++++++++++++++++++++++++++++++++---------
 include/hw/xen/xen_backend.h |   2 +
 3 files changed, 285 insertions(+), 55 deletions(-)

diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
index c68487cb..04dce2f 100644
--- a/hw/block/xen_blkif.h
+++ b/hw/block/xen_blkif.h
@@ -18,40 +18,97 @@ struct blkif_common_response {
 
 /* i386 protocol version */
 #pragma pack(push, 4)
-struct blkif_x86_32_request {
-       uint8_t        operation;    /* BLKIF_OP_???                         */
+struct blkif_x86_32_request_direct {
        uint8_t        nr_segments;  /* number of segments                   */
        blkif_vdev_t   handle;       /* only for read/write requests         */
        uint64_t       id;           /* private guest value, echoed in resp  */
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
        struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-};
+} __attribute__((__packed__));
+
+struct blkif_x86_32_request_indirect {
+       uint8_t        operation;
+       uint16_t       nr_segments;
+       uint64_t       id;
+       blkif_sector_t sector_number;
+       blkif_vdev_t   handle;
+       uint16_t       _pad2;
+       grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+       uint64_t       _pad3;         /* make it 64 byte aligned */
+} __attribute__((__packed__));
+
+struct blkif_x86_32_request {
+       uint8_t            operation;
+       union  {
+               struct blkif_x86_32_request_direct direct;
+               struct blkif_x86_32_request_indirect indirect;
+       } u;
+} __attribute__((__packed__));
+
 struct blkif_x86_32_response {
        uint64_t        id;              /* copied from request */
        uint8_t         operation;       /* copied from request */
        int16_t         status;          /* BLKIF_RSP_???       */
-};
+} __attribute__((__packed__));
+
 typedef struct blkif_x86_32_request blkif_x86_32_request_t;
+typedef struct blkif_x86_32_request_direct blkif_x86_32_request_direct_t;
+typedef struct blkif_x86_32_request_indirect blkif_x86_32_request_indirect_t;
 typedef struct blkif_x86_32_response blkif_x86_32_response_t;
 #pragma pack(pop)
 
 /* x86_64 protocol version */
-struct blkif_x86_64_request {
-       uint8_t        operation;    /* BLKIF_OP_???                         */
+struct blkif_x86_64_request_direct {
        uint8_t        nr_segments;  /* number of segments                   */
        blkif_vdev_t   handle;       /* only for read/write requests         */
-       uint64_t       __attribute__((__aligned__(8))) id;
+       uint32_t       _pad1;        /* offsetof(blkif_request,u.rw.id) == 8 */
+       uint64_t       id;           /* private guest value, echoed in resp  */
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
        struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-};
+} __attribute__((__packed__));
+
+struct blkif_x86_64_request_indirect {
+       uint8_t        operation;
+       uint16_t       nr_segments;
+       uint32_t       _pad1;  
+       uint64_t       id;
+       blkif_sector_t sector_number;
+       blkif_vdev_t   handle;
+       uint16_t       _pad2;
+       grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+       uint32_t      _pad3;         /* make it 64 byte aligned */
+} __attribute__((__packed__));
+
+struct blkif_x86_64_request {
+       uint8_t        operation;    /* BLKIF_OP_???                         */
+       union {
+               struct blkif_x86_64_request_direct direct;
+               struct blkif_x86_64_request_indirect indirect;
+       } u;
+} __attribute__((__packed__));
+
 struct blkif_x86_64_response {
-       uint64_t       __attribute__((__aligned__(8))) id;
+       uint64_t        id;              /* copied from request */
        uint8_t         operation;       /* copied from request */
        int16_t         status;          /* BLKIF_RSP_???       */
 };
+
 typedef struct blkif_x86_64_request blkif_x86_64_request_t;
+typedef struct blkif_x86_64_request_direct blkif_x86_64_request_direct_t;
+typedef struct blkif_x86_64_request_indirect blkif_x86_64_request_indirect_t;
 typedef struct blkif_x86_64_response blkif_x86_64_response_t;
 
+struct blkif_request_local {
+       uint8_t            operation;
+       union {
+               struct blkif_request direct;
+               struct blkif_request_indirect indirect;
+       } u;
+} __attribute__((__packed__));
+typedef struct blkif_request blkif_request_direct_t;
+typedef struct blkif_request_indirect blkif_request_indirect_t;
+typedef struct blkif_request_local blkif_request_local_t;
+
 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct 
blkif_common_response);
 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct 
blkif_x86_32_response);
 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct 
blkif_x86_64_response);
@@ -70,16 +127,27 @@ enum blkif_protocol {
        BLKIF_PROTOCOL_X86_64 = 3,
 };
 
-static inline void blkif_get_x86_32_req(blkif_request_t *dst, 
blkif_x86_32_request_t *src)
+#define XEN_PAGE_SIZE 4096
+#define XEN_PAGES_PER_SEGMENT 1
+#define XEN_PAGES_PER_INDIRECT_FRAME \
+      (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
+#define SEGS_PER_INDIRECT_FRAME \
+      (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
+#define MAX_INDIRECT_PAGES \
+      ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 
1)/SEGS_PER_INDIRECT_FRAME)
+#define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
+
+static inline void blkif_get_x86_32_req_direct(blkif_request_direct_t *dst, 
+                                                                               
       blkif_x86_32_request_direct_t *src,
+                                                                               
       uint8_t operation)
 {
        int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
 
-       dst->operation = src->operation;
        dst->nr_segments = src->nr_segments;
        dst->handle = src->handle;
        dst->id = src->id;
        dst->sector_number = src->sector_number;
-       if (src->operation == BLKIF_OP_DISCARD) {
+       if (operation == BLKIF_OP_DISCARD) {
                struct blkif_request_discard *s = (void *)src;
                struct blkif_request_discard *d = (void *)dst;
                d->nr_sectors = s->nr_sectors;
@@ -93,16 +161,43 @@ static inline void blkif_get_x86_32_req(blkif_request_t 
*dst, blkif_x86_32_reque
                dst->seg[i] = src->seg[i];
 }
 
-static inline void blkif_get_x86_64_req(blkif_request_t *dst, 
blkif_x86_64_request_t *src)
+static inline void blkif_get_x86_32_req_indirect(blkif_request_indirect_t 
*dst, 
+                                                                               
     blkif_x86_32_request_indirect_t *src)
 {
-       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+       int i, n;
+
+       dst->operation = src->operation;
+       dst->nr_segments = src->nr_segments;
+       dst->handle = src->handle;
+       dst->id = src->id;
+       dst->sector_number = src->sector_number;
+       n = INDIRECT_PAGES(dst->nr_segments);
+       for (i = 0; i < n; i++)
+               dst->indirect_grefs[i] = src->indirect_grefs[i];
+}
 
+static inline void blkif_get_x86_32_req_local(blkif_request_local_t *dst, 
+                                                                               
          blkif_x86_32_request_t *src) 
+{
        dst->operation = src->operation;
+       if (dst->operation == BLKIF_OP_INDIRECT) {
+               blkif_get_x86_32_req_indirect(&dst->u.indirect, 
&src->u.indirect);
+       } else {
+               blkif_get_x86_32_req_direct(&dst->u.direct, &src->u.direct, 
dst->operation);
+       }
+}
+
+static inline void blkif_get_x86_64_req_direct(blkif_request_direct_t *dst, 
+                                                                               
           blkif_x86_64_request_direct_t *src,
+                                                                               
       uint8_t operation)
+{
+       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
        dst->nr_segments = src->nr_segments;
        dst->handle = src->handle;
        dst->id = src->id;
        dst->sector_number = src->sector_number;
-       if (src->operation == BLKIF_OP_DISCARD) {
+       if (operation == BLKIF_OP_DISCARD) {
                struct blkif_request_discard *s = (void *)src;
                struct blkif_request_discard *d = (void *)dst;
                d->nr_sectors = s->nr_sectors;
@@ -116,4 +211,30 @@ static inline void blkif_get_x86_64_req(blkif_request_t 
*dst, blkif_x86_64_reque
                dst->seg[i] = src->seg[i];
 }
 
+static inline void blkif_get_x86_64_req_indirect(blkif_request_indirect_t 
*dst, 
+                                                                               
     blkif_x86_64_request_indirect_t *src)
+{
+       int i, n;
+
+       dst->operation = src->operation;
+       dst->nr_segments = src->nr_segments;
+       dst->handle = src->handle;
+       dst->id = src->id;
+       dst->sector_number = src->sector_number;
+       n = INDIRECT_PAGES(dst->nr_segments);
+       for (i = 0; i < n; i++)
+               dst->indirect_grefs[i] = src->indirect_grefs[i];
+}
+
+static inline void blkif_get_x86_64_req_local(blkif_request_local_t *dst, 
+                                                                               
          blkif_x86_64_request_t *src) 
+{
+       dst->operation = src->operation;
+       if (dst->operation == BLKIF_OP_INDIRECT) {
+               blkif_get_x86_64_req_indirect(&dst->u.indirect, 
&src->u.indirect);
+       } else {
+               blkif_get_x86_64_req_direct(&dst->u.direct, &src->u.direct, 
dst->operation);
+       }
+}
+
 #endif /* __XEN_BLKIF_H__ */
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 37e14d1..e497cde 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -52,7 +52,6 @@ static int max_requests = 32;
 /* ------------------------------------------------------------- */
 
 #define BLOCK_SIZE  512
-#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
 
 struct PersistentGrant {
     void *page;
@@ -69,8 +68,8 @@ struct PersistentRegion {
 typedef struct PersistentRegion PersistentRegion;
 
 struct ioreq {
-    blkif_request_t     req;
-    int16_t             status;
+    blkif_request_local_t   req;
+    int16_t                 status;
 
     /* parsed request */
     off_t               start;
@@ -80,19 +79,22 @@ struct ioreq {
     uint8_t             mapped;
 
     /* grant mapping */
-    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    uint32_t            domids[MAX_INDIRECT_SEGMENTS];
+    uint32_t            refs[MAX_INDIRECT_SEGMENTS];
     int                 prot;
-    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    void                *page[MAX_INDIRECT_SEGMENTS];
     void                *pages;
     int                 num_unmap;
 
+    /* indirect request */
+    struct blkif_request_segment seg[MAX_INDIRECT_SEGMENTS];
+
     /* aio status */
     int                 aio_inflight;
     int                 aio_errors;
 
     struct XenBlkDev    *blkdev;
-    QLIST_ENTRY(ioreq)   list;
+    QLIST_ENTRY(ioreq)  list;
     BlockAcctCookie     acct;
 };
 
@@ -131,6 +133,9 @@ struct XenBlkDev {
     unsigned int        persistent_gnt_count;
     unsigned int        max_grants;
 
+    /* Indirect descriptors */
+    gboolean            feature_indirect;
+
     /* qemu block driver */
     DriveInfo           *dinfo;
     BlockBackend        *blk;
@@ -216,7 +221,11 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
         ioreq = g_malloc0(sizeof(*ioreq));
         ioreq->blkdev = blkdev;
         blkdev->requests_total++;
-        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+        if (blkdev->feature_indirect) {
+            qemu_iovec_init(&ioreq->v, MAX_INDIRECT_SEGMENTS);
+        } else {
+            qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+        }
     } else {
         /* get one from freelist */
         ioreq = QLIST_FIRST(&blkdev->freelist);
@@ -254,6 +263,57 @@ static void ioreq_release(struct ioreq *ioreq, bool finish)
     }
 }
 
+static void ioreq_get_operation_and_nr_segments(struct ioreq *ioreq, 
+                                                uint8_t *operation, 
+                                                uint16_t *nseg)
+{
+    if (ioreq->req.operation == BLKIF_OP_INDIRECT) {
+        *operation = ioreq->req.u.indirect.operation;
+        *nseg = ioreq->req.u.indirect.nr_segments;
+    } else {
+        *operation = ioreq->req.operation;
+        *nseg = ioreq->req.u.direct.nr_segments;
+    }
+}
+
+static int ioreq_parse_indirect(struct XenBlkDev *blkdev,
+                                blkif_request_indirect_t *req, uint32_t domid,
+                                struct blkif_request_segment *seg) 
+{
+    void *pages;
+    struct blkif_request_segment *segments = NULL;
+    int i, j, nr_indirect_grefs;
+
+    nr_indirect_grefs = INDIRECT_PAGES(req->nr_segments);
+
+    pages = xc_gnttab_map_domain_grant_refs(blkdev->xendev.gnttabdev, 
+                                            nr_indirect_grefs, domid, 
+                                            req->indirect_grefs,
+                                            PROT_READ);
+    
+    if (pages == NULL) {
+        xen_be_printf(&blkdev->xendev, 0, "can't map indirect grant refs 
%s\n", 
+                      strerror(errno));
+        return -1;
+    }
+
+    for (i = 0, j = 0; j < req->nr_segments; j++) {
+        i = j % SEGS_PER_INDIRECT_FRAME;
+        if (i == 0) {
+            segments = pages + i/SEGS_PER_INDIRECT_FRAME * XC_PAGE_SIZE;
+        }
+        seg[j].gref = segments[i].gref;
+        seg[j].first_sect = segments[i].first_sect;
+        seg[j].last_sect = segments[i].last_sect;
+    }
+
+    if (xc_gnttab_munmap(blkdev->xendev.gnttabdev, pages, nr_indirect_grefs)) {
+        xen_be_printf(&blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
+                      strerror(errno));
+    }
+
+    return 0;
+}
 /*
  * translate request into iovec + start offset
  * do sanity checks along the way
@@ -261,21 +321,21 @@ static void ioreq_release(struct ioreq *ioreq, bool 
finish)
 static int ioreq_parse(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
+    uint8_t operation;
+    uint16_t nseg;
     uintptr_t mem;
     size_t len;
-    int i;
+    int i, r;
 
-    xen_be_printf(&blkdev->xendev, 3,
-                  "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 
"\n",
-                  ioreq->req.operation, ioreq->req.nr_segments,
-                  ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
-    switch (ioreq->req.operation) {
+    ioreq_get_operation_and_nr_segments(ioreq, &operation, &nseg);
+
+    switch (operation) {
     case BLKIF_OP_READ:
         ioreq->prot = PROT_WRITE; /* to memory */
         break;
     case BLKIF_OP_FLUSH_DISKCACHE:
         ioreq->presync = 1;
-        if (!ioreq->req.nr_segments) {
+        if (!nseg) {
             return 0;
         }
         /* fall through */
@@ -286,35 +346,53 @@ static int ioreq_parse(struct ioreq *ioreq)
         return 0;
     default:
         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
-                      ioreq->req.operation);
+                      operation);
         goto err;
     };
 
-    if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
+    if (operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
         goto err;
     }
 
-    ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
-    for (i = 0; i < ioreq->req.nr_segments; i++) {
-        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+    if (ioreq->req.operation == BLKIF_OP_INDIRECT) {
+        if (nseg > MAX_INDIRECT_SEGMENTS) {
             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
             goto err;
         }
-        if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
+        r = ioreq_parse_indirect(ioreq->blkdev, &ioreq->req.u.indirect, 
+                                 blkdev->xendev.dom, ioreq->seg);
+        if (r != 0) {
+            xen_be_printf(&blkdev->xendev, 0, 
+                                  "error: failed to map indirect segments\n");
+            goto err;
+        }
+        ioreq->start = ioreq->req.u.indirect.sector_number * blkdev->file_blk;
+    } else {
+        if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+            xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
+            goto err;
+        }
+        memcpy(ioreq->seg, ioreq->req.u.direct.seg, sizeof(struct 
blkif_request_segment)*nseg);
+        ioreq->start = ioreq->req.u.direct.sector_number * blkdev->file_blk;
+    }
+
+    for (i = 0; i < nseg; i++) {
+
+        if (ioreq->seg[i].first_sect > ioreq->seg[i].last_sect) {
             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
             goto err;
         }
-        if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
+        if (ioreq->seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
             goto err;
         }
 
         ioreq->domids[i] = blkdev->xendev.dom;
-        ioreq->refs[i]   = ioreq->req.seg[i].gref;
+        ioreq->refs[i]   = ioreq->seg[i].gref;
 
-        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
-        len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) 
* blkdev->file_blk;
+        mem = ioreq->seg[i].first_sect * blkdev->file_blk;
+        len = (ioreq->seg[i].last_sect - ioreq->seg[i].first_sect + 1) * 
blkdev->file_blk;
         qemu_iovec_add(&ioreq->v, (void*)mem, len);
     }
     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
@@ -365,9 +443,9 @@ static void ioreq_unmap(struct ioreq *ioreq)
 static int ioreq_map(struct ioreq *ioreq)
 {
     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
-    uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    uint32_t domids[MAX_INDIRECT_SEGMENTS];
+    uint32_t refs[MAX_INDIRECT_SEGMENTS];
+    void *page[MAX_INDIRECT_SEGMENTS];
     int i, j, new_maps = 0;
     PersistentGrant *grant;
     PersistentRegion *region;
@@ -505,10 +583,14 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
 static void qemu_aio_complete(void *opaque, int ret)
 {
     struct ioreq *ioreq = opaque;
+    uint8_t operation;    
+    uint16_t nseg;
+
+    ioreq_get_operation_and_nr_segments(ioreq, &operation, &nseg);
 
     if (ret != 0) {
         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
-                      ioreq->req.operation == BLKIF_OP_READ ? "read" : 
"write");
+                      operation ? "read" : "write");
         ioreq->aio_errors++;
     }
 
@@ -531,10 +613,10 @@ static void qemu_aio_complete(void *opaque, int ret)
     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
     ioreq_unmap(ioreq);
     ioreq_finish(ioreq);
-    switch (ioreq->req.operation) {
+    switch (operation) {
     case BLKIF_OP_WRITE:
     case BLKIF_OP_FLUSH_DISKCACHE:
-        if (!ioreq->req.nr_segments) {
+        if (!nseg) {
             break;
         }
     case BLKIF_OP_READ:
@@ -550,8 +632,12 @@ static void qemu_aio_complete(void *opaque, int ret)
 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
+    uint8_t operation;    
+    uint16_t nseg;
 
-    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
+    ioreq_get_operation_and_nr_segments(ioreq, &operation, &nseg);
+
+    if (nseg && ioreq_map(ioreq) == -1) {
         goto err_no_map;
     }
 
@@ -561,7 +647,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
         return 0;
     }
 
-    switch (ioreq->req.operation) {
+    switch (operation) {
     case BLKIF_OP_READ:
         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
                          ioreq->v.size, BLOCK_ACCT_READ);
@@ -572,7 +658,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
         break;
     case BLKIF_OP_WRITE:
     case BLKIF_OP_FLUSH_DISKCACHE:
-        if (!ioreq->req.nr_segments) {
+        if (!nseg) {
             break;
         }
 
@@ -617,8 +703,13 @@ static int blk_send_response_one(struct ioreq *ioreq)
     blkif_response_t  resp;
     void              *dst;
 
-    resp.id        = ioreq->req.id;
-    resp.operation = ioreq->req.operation;
+    if (ioreq->req.operation == BLKIF_OP_INDIRECT) {
+        resp.id        = ioreq->req.u.indirect.id;
+        resp.operation = ioreq->req.u.indirect.operation;
+    } else {        
+        resp.id        = ioreq->req.u.direct.id;
+        resp.operation = ioreq->req.operation;
+    }   
     resp.status    = ioreq->status;
 
     /* Place on the response ring for the relevant domain. */
@@ -683,11 +774,11 @@ static int blk_get_request(struct XenBlkDev *blkdev, 
struct ioreq *ioreq, RING_I
                sizeof(ioreq->req));
         break;
     case BLKIF_PROTOCOL_X86_32:
-        blkif_get_x86_32_req(&ioreq->req,
+        blkif_get_x86_32_req_local(&ioreq->req,
                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
         break;
     case BLKIF_PROTOCOL_X86_64:
-        blkif_get_x86_64_req(&ioreq->req,
+        blkif_get_x86_64_req_local(&ioreq->req,
                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
         break;
     }
@@ -756,6 +847,7 @@ static void blk_bh(void *opaque)
 static void blk_alloc(struct XenDevice *xendev)
 {
     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
+    int max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
 
     QLIST_INIT(&blkdev->inflight);
     QLIST_INIT(&blkdev->finished);
@@ -764,8 +856,15 @@ static void blk_alloc(struct XenDevice *xendev)
     if (xen_mode != XEN_EMULATE) {
         batch_maps = 1;
     }
+    blkdev->feature_indirect = true;
+
+    if (blkdev->feature_indirect) {
+        max_segments = MAX_INDIRECT_SEGMENTS;
+    }
+
+    if (blkdev->feature_indirect)
     if (xc_gnttab_set_max_grants(xendev->gnttabdev,
-            MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
+            MAX_GRANTS(max_requests, max_segments)) < 0) {
         xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
                       strerror(errno));
     }
@@ -855,6 +954,10 @@ static int blk_init(struct XenDevice *xendev)
     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
     xenstore_write_be_int(&blkdev->xendev, "info", info);
+    if (blkdev->feature_indirect) {
+        xenstore_write_be_int(&blkdev->xendev, 
"feature-max-indirect-segments", 
+                              MAX_INDIRECT_SEGMENTS);
+    }
 
     blk_parse_discard(blkdev);
 
@@ -1008,7 +1111,11 @@ static int blk_connect(struct XenDevice *xendev)
 
     if (blkdev->feature_persistent) {
         /* Init persistent grants */
-        blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
+        if (blkdev->feature_indirect) {
+            blkdev->max_grants = max_requests * MAX_INDIRECT_SEGMENTS;
+        } else {
+            blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
+        }
         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
                                              NULL, NULL,
                                              batch_maps ?
diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h
index 3b4125e..6836f98 100644
--- a/include/hw/xen/xen_backend.h
+++ b/include/hw/xen/xen_backend.h
@@ -15,6 +15,8 @@ struct XenDevice;
 #define DEVOPS_FLAG_NEED_GNTDEV   1
 /* don't expect frontend doing correct state transitions (aka console quirk) */
 #define DEVOPS_FLAG_IGNORE_STATE  2
+/* */
+#define MAX_INDIRECT_SEGMENTS 32
 
 struct XenDevOps {
     size_t    size;
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.