[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6 of 12] backport 37b7842c2fb405c270efdce714425c17af3c78cd



Backport 37b7842c2fb405c270efdce714425c17af3c78cd from qemu upstream:

Move block dma helpers aiocb to store dma state (Avi Kivity)

---

diff --git a/dma-helpers.c b/dma-helpers.c
index a6b129d..a76c618 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -40,6 +40,7 @@ void qemu_sglist_destroy(QEMUSGList *qsg)
 }
 
 typedef struct {
+    BlockDriverAIOCB common;
     BlockDriverState *bs;
     BlockDriverAIOCB *acb;
     QEMUSGList *sg;
@@ -49,13 +50,13 @@ typedef struct {
     target_phys_addr_t sg_cur_byte;
     QEMUIOVector iov;
     QEMUBH *bh;
-} DMABlockState;
+} DMAAIOCB;
 
 static void dma_bdrv_cb(void *opaque, int ret);
 
 static void reschedule_dma(void *opaque)
 {
-    DMABlockState *dbs = (DMABlockState *)opaque;
+    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
 
     qemu_bh_delete(dbs->bh);
     dbs->bh = NULL;
@@ -64,7 +65,7 @@ static void reschedule_dma(void *opaque)
 
 static void continue_after_map_failure(void *opaque)
 {
-    DMABlockState *dbs = (DMABlockState *)opaque;
+    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
 
     dbs->bh = qemu_bh_new(reschedule_dma, dbs);
     qemu_bh_schedule(dbs->bh);
@@ -72,11 +73,12 @@ static void continue_after_map_failure(void *opaque)
 
 static void dma_bdrv_cb(void *opaque, int ret)
 {
-    DMABlockState *dbs = (DMABlockState *)opaque;
+    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
     target_phys_addr_t cur_addr, cur_len;
     void *mem;
     int i;
 
+    dbs->acb = NULL;
     dbs->sector_num += dbs->iov.size / 512;
     for (i = 0; i < dbs->iov.niov; ++i) {
         cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
@@ -86,10 +88,9 @@ static void dma_bdrv_cb(void *opaque, int ret)
     qemu_iovec_reset(&dbs->iov);
 
     if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
-        dbs->acb->cb(dbs->acb->opaque, ret);
+        dbs->common.cb(dbs->common.opaque, ret);
         qemu_iovec_destroy(&dbs->iov);
-        qemu_aio_release(dbs->acb);
-        qemu_free(dbs);
+        qemu_aio_release(dbs);
         return;
     }
 
@@ -113,11 +114,11 @@ static void dma_bdrv_cb(void *opaque, int ret)
     }
 
     if (dbs->is_write) {
-        bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
-                        dbs->iov.size / 512, dma_bdrv_cb, dbs);
+        dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
+                                   dbs->iov.size / 512, dma_bdrv_cb, dbs);
     } else {
-        bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
-                       dbs->iov.size / 512, dma_bdrv_cb, dbs);
+        dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
+                                  dbs->iov.size / 512, dma_bdrv_cb, dbs);
     }
 }
 
@@ -126,10 +127,10 @@ static BlockDriverAIOCB *dma_bdrv_io(
     BlockDriverCompletionFunc *cb, void *opaque,
     int is_write)
 {
-    DMABlockState *dbs = qemu_malloc(sizeof(*dbs));
+    DMAAIOCB *dbs =  qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
 
+    dbs->acb = NULL;
     dbs->bs = bs;
-    dbs->acb = qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
     dbs->sg = sg;
     dbs->sector_num = sector_num;
     dbs->sg_cur_index = 0;
@@ -151,7 +152,7 @@ static BlockDriverAIOCB *dma_bdrv_io(
     }
 #endif
 
-    return dbs->acb;
+    return &dbs->common;
 }
 
 
@@ -171,12 +172,14 @@ BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
 
 static void dma_aio_cancel(BlockDriverAIOCB *acb)
 {
-    DMABlockState *dbs = (DMABlockState *)acb->opaque;
+    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 
-    bdrv_aio_cancel(dbs->acb);
+    if (dbs->acb) {
+        bdrv_aio_cancel(dbs->acb);
+    }
 }
 
 void dma_helper_init(void)
 {
-    aio_pool_init(&dma_aio_pool, sizeof(BlockDriverAIOCB), dma_aio_cancel);
+    aio_pool_init(&dma_aio_pool, sizeof(DMAAIOCB), dma_aio_cancel);
 }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.