[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2 of 12] backport 6bbff9a0b495918309074ac60375be5f9dc868b3



Backport 6bbff9a0b495918309074ac60375be5f9dc868b3 from qemu upstream:

Refactor aio callback allocation to use an aiocb pool (Avi Kivity)

---

diff -r 383475bf6dd6 block.c
--- a/block.c   Fri Oct 02 13:35:57 2009 +0100
+++ b/block.c   Fri Oct 02 13:38:10 2009 +0100
@@ -145,6 +145,7 @@
     }
     if (!bdrv->bdrv_aio_flush)
         bdrv->bdrv_aio_flush = bdrv_aio_flush_em;
+    aio_pool_init(&bdrv->aio_pool, bdrv->aiocb_size, bdrv->bdrv_aio_cancel);
     bdrv->next = first_drv;
     first_drv = bdrv;
 }
@@ -1479,14 +1480,12 @@
 
 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
 {
-    BlockDriver *drv = acb->bs->drv;
-
     if (acb->cb == bdrv_aio_rw_vector_cb) {
         VectorTranslationState *s = acb->opaque;
         acb = s->aiocb;
     }
 
-    drv->bdrv_aio_cancel(acb);
+    acb->pool->cancel(acb);
 }
 
 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, 
@@ -1633,18 +1632,25 @@
 #endif
 }
 
-void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
-                   void *opaque)
+void aio_pool_init(AIOPool *pool, int aiocb_size,
+                   void (*cancel)(BlockDriverAIOCB *acb))
 {
-    BlockDriver *drv;
+    pool->aiocb_size = aiocb_size;
+    pool->cancel = cancel;
+    pool->free_aiocb = NULL;
+}
+
+void *qemu_aio_get_pool(AIOPool *pool, BlockDriverState *bs,
+                        BlockDriverCompletionFunc *cb, void *opaque)
+{
     BlockDriverAIOCB *acb;
 
-    drv = bs->drv;
-    if (drv->free_aiocb) {
-        acb = drv->free_aiocb;
-        drv->free_aiocb = acb->next;
+    if (pool->free_aiocb) {
+        acb = pool->free_aiocb;
+        pool->free_aiocb = acb->next;
     } else {
-        acb = qemu_mallocz(drv->aiocb_size);
+        acb = qemu_mallocz(pool->aiocb_size);
+        acb->pool = pool;
     }
     acb->bs = bs;
     acb->cb = cb;
@@ -1652,12 +1658,18 @@
     return acb;
 }
 
+void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
+                   void *opaque)
+{
+    return qemu_aio_get_pool(&bs->drv->aio_pool, bs, cb, opaque);
+}
+
 void qemu_aio_release(void *p)
 {
-    BlockDriverAIOCB *acb = p;
-    BlockDriver *drv = acb->bs->drv;
-    acb->next = drv->free_aiocb;
-    drv->free_aiocb = acb;
+    BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
+    AIOPool *pool = acb->pool;
+    acb->next = pool->free_aiocb;
+    pool->free_aiocb = acb;
 }
 
 /**************************************************************/
diff -r 383475bf6dd6 block_int.h
--- a/block_int.h       Fri Oct 02 13:35:57 2009 +0100
+++ b/block_int.h       Fri Oct 02 13:38:10 2009 +0100
@@ -31,6 +31,12 @@
 #define BLOCK_FLAG_COMPAT6     4
 
 #define BLOCK_DRIVER_FLAG_EXTENDABLE  0x0001u
+
+typedef struct AIOPool {
+    void (*cancel)(BlockDriverAIOCB *acb);
+    int aiocb_size;
+    BlockDriverAIOCB *free_aiocb;
+} AIOPool;
 
 struct BlockDriver {
     const char *format_name;
@@ -95,7 +101,7 @@
     int (*bdrv_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf);
 
     unsigned bdrv_flags;
-    BlockDriverAIOCB *free_aiocb;
+    AIOPool aio_pool;
     struct BlockDriver *next;
 };
 
@@ -146,6 +152,7 @@
 };
 
 struct BlockDriverAIOCB {
+    AIOPool *pool;
     BlockDriverState *bs;
     BlockDriverCompletionFunc *cb;
     void *opaque;
@@ -154,8 +161,13 @@
 
 void get_tmp_filename(char *filename, int size);
 
+void aio_pool_init(AIOPool *pool, int aiocb_size,
+                   void (*cancel)(BlockDriverAIOCB *acb));
+
 void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
                    void *opaque);
+void *qemu_aio_get_pool(AIOPool *pool, BlockDriverState *bs,
+                        BlockDriverCompletionFunc *cb, void *opaque);
 void qemu_aio_release(void *p);
 
 extern BlockDriverState *bdrv_first;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.