WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] qemu and qemu-xen: support empty write barriers in x

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH] qemu and qemu-xen: support empty write barriers in xen_disk
From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Date: Wed, 24 Nov 2010 13:08:03 +0000
Cc: qemu-devel@xxxxxxxxxx
Delivery-date: Wed, 24 Nov 2010 05:08:36 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Alpine 2.00 (DEB 1167 2008-08-23)
Hi all,
this patch can be applied to both qemu-xen and qemu and adds support
for empty write barriers to xen_disk.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

diff --git a/hw/xen_disk.c b/hw/xen_disk.c
index 38b5fbf..94af001 100644
--- a/hw/xen_disk.c
+++ b/hw/xen_disk.c
@@ -182,6 +182,10 @@ static int ioreq_parse(struct ioreq *ioreq)
        ioreq->prot = PROT_WRITE; /* to memory */
        break;
     case BLKIF_OP_WRITE_BARRIER:
+        if (!ioreq->req.nr_segments) {
+            ioreq->presync = 1;
+            return 0;
+        }
        if (!syncwrite)
            ioreq->presync = ioreq->postsync = 1;
        /* fall through */
@@ -306,7 +310,7 @@ static int ioreq_runio_qemu_sync(struct ioreq *ioreq)
     int i, rc, len = 0;
     off_t pos;
 
-    if (ioreq_map(ioreq) == -1)
+    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1)
        goto err;
     if (ioreq->presync)
        bdrv_flush(blkdev->bs);
@@ -330,6 +334,8 @@ static int ioreq_runio_qemu_sync(struct ioreq *ioreq)
        break;
     case BLKIF_OP_WRITE:
     case BLKIF_OP_WRITE_BARRIER:
+        if (!ioreq->req.nr_segments)
+            break;
        pos = ioreq->start;
        for (i = 0; i < ioreq->v.niov; i++) {
            rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE,
@@ -387,7 +393,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
 
-    if (ioreq_map(ioreq) == -1)
+    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1)
        goto err;
 
     ioreq->aio_inflight++;
@@ -404,6 +410,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
     case BLKIF_OP_WRITE:
     case BLKIF_OP_WRITE_BARRIER:
         ioreq->aio_inflight++;
+        if (!ioreq->req.nr_segments)
+            break;
         bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
                         &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                         qemu_aio_complete, ioreq);

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel