[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH qemu-xen-traditional] Fix after blkif.h update



24875:a59c1dcfe968 made an incompatible change to the interface headers which
needs to be reflected here.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 block-vbd.c    |    2 +-
 hw/e1000.c     |    3 +++
 hw/ide.c       |    2 +-
 hw/scsi-disk.c |    2 +-
 hw/xen_blkif.h |    8 ++++----
 hw/xen_disk.c  |   12 ++++++------
 6 files changed, 16 insertions(+), 13 deletions(-)

diff --git a/block-vbd.c b/block-vbd.c
index 71f1731..2a80f09 100644
--- a/block-vbd.c
+++ b/block-vbd.c
@@ -33,7 +33,7 @@
 
 #include <xen/io/blkif.h>
 #define IDE_DMA_BUF_SECTORS \
-       (((BLKIF_MAX_SEGMENTS_PER_REQUEST - 1 ) * TARGET_PAGE_SIZE) / 512)
+       (((BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK - 1 ) * TARGET_PAGE_SIZE) / 512)
 #define IDE_DMA_BUF_BYTES (IDE_DMA_BUF_SECTORS * 512)
 #define SECTOR_SIZE 512
 
diff --git a/hw/e1000.c b/hw/e1000.c
index bb3689e..97104ed 100644
--- a/hw/e1000.c
+++ b/hw/e1000.c
@@ -444,6 +444,8 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
             bytes = split_size;
             if (tp->size + bytes > msh)
                 bytes = msh - tp->size;
+
+            bytes = MIN(sizeof(tp->data) - tp->size, bytes);
             cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
             if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
                 memmove(tp->header, tp->data, hdr);
@@ -459,6 +461,7 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
         // context descriptor TSE is not set, while data descriptor TSE is set
         DBGOUT(TXERR, "TCP segmentaion Error\n");
     } else {
+        split_size = MIN(sizeof(tp->data) - tp->size, split_size);
         cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
         tp->size += split_size;
     }
diff --git a/hw/ide.c b/hw/ide.c
index 791666b..c3d3d60 100644
--- a/hw/ide.c
+++ b/hw/ide.c
@@ -209,7 +209,7 @@
 #ifdef CONFIG_STUBDOM
 #include <xen/io/blkif.h>
 #define IDE_DMA_BUF_SECTORS \
-       (((BLKIF_MAX_SEGMENTS_PER_REQUEST - 1 ) * TARGET_PAGE_SIZE) / 512)
+       (((BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK - 1 ) * TARGET_PAGE_SIZE) / 512)
 #else
 #define IDE_DMA_BUF_SECTORS 256
 #endif
diff --git a/hw/scsi-disk.c b/hw/scsi-disk.c
index 520009e..99c2cdf 100644
--- a/hw/scsi-disk.c
+++ b/hw/scsi-disk.c
@@ -42,7 +42,7 @@ do { fprintf(stderr, "scsi-disk: " fmt , ##args); } while (0)
 
 #ifdef CONFIG_STUBDOM
 #include <xen/io/blkif.h>
-#define SCSI_DMA_BUF_SIZE    ((BLKIF_MAX_SEGMENTS_PER_REQUEST - 1) * 
TARGET_PAGE_SIZE)
+#define SCSI_DMA_BUF_SIZE    ((BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK - 1) * 
TARGET_PAGE_SIZE)
 #else
 #define SCSI_DMA_BUF_SIZE    131072
 #endif
diff --git a/hw/xen_blkif.h b/hw/xen_blkif.h
index ca3a65b..485a166 100644
--- a/hw/xen_blkif.h
+++ b/hw/xen_blkif.h
@@ -24,7 +24,7 @@ struct blkif_x86_32_request {
        blkif_vdev_t   handle;       /* only for read/write requests         */
        uint64_t       id;           /* private guest value, echoed in resp  */
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
 };
 struct blkif_x86_32_response {
        uint64_t        id;              /* copied from request */
@@ -42,7 +42,7 @@ struct blkif_x86_64_request {
        blkif_vdev_t   handle;       /* only for read/write requests         */
        uint64_t       __attribute__((__aligned__(8))) id;
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
 };
 struct blkif_x86_64_response {
        uint64_t       __attribute__((__aligned__(8))) id;
@@ -72,7 +72,7 @@ enum blkif_protocol {
 
 static inline void blkif_get_x86_32_req(blkif_request_t *dst, 
blkif_x86_32_request_t *src)
 {
-       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+       int i, n = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
 
        dst->operation = src->operation;
        dst->nr_segments = src->nr_segments;
@@ -87,7 +87,7 @@ static inline void blkif_get_x86_32_req(blkif_request_t *dst, 
blkif_x86_32_reque
 
 static inline void blkif_get_x86_64_req(blkif_request_t *dst, 
blkif_x86_64_request_t *src)
 {
-       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+       int i, n = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
 
        dst->operation = src->operation;
        dst->nr_segments = src->nr_segments;
diff --git a/hw/xen_disk.c b/hw/xen_disk.c
index 6aebb77..092def8 100644
--- a/hw/xen_disk.c
+++ b/hw/xen_disk.c
@@ -55,7 +55,7 @@ static int use_aio      = 0;
 /* ------------------------------------------------------------- */
 
 #define BLOCK_SIZE  512
-#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
+#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK + 2)
 
 struct ioreq {
     blkif_request_t     req;
@@ -68,10 +68,10 @@ struct ioreq {
     int                 postsync;
 
     /* grant mapping */
-    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
+    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
     int                 prot;
-    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    void                *page[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
     void                *pages;
 
     /* aio status */
@@ -127,7 +127,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
        ioreq = qemu_mallocz(sizeof(*ioreq));
        ioreq->blkdev = blkdev;
        blkdev->requests_total++;
-        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK);
     } else {
        /* get one from freelist */
        ioreq = LIST_FIRST(&blkdev->freelist);
@@ -207,7 +207,7 @@ static int ioreq_parse(struct ioreq *ioreq)
 
     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
     for (i = 0; i < ioreq->req.nr_segments; i++) {
-       if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+       if (i == BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK) {
            xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
            goto err;
        }
-- 
1.7.2.5




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.