[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC Patch v3 04/18] blktap2: dynamic allocate aio_requests to avoid -EBUSY error



From: Lai Jiangshan <laijs@xxxxxxxxxxxxxx>

In normal case, there are at most TAPDISK_DATA_REQUESTS request
at the same time. But in remus mode, the write requests are
forwarded from the master side, and cached in block-remus. All
cached requests will be forwarded to aio driver when syncing
primary vm and backup vm. In this case, The number of requests
may be more than TAPDISK_DATA_REQUESTS. So aio driver can't hanlde
these requests at the same time, it will cause tapdisk2 exit.

We don't know how many requests will be handled, so dynamic allocate
aio_requests to avoid this error.

Signed-off-by: Lai Jiangshan <laijs@xxxxxxxxxxxxxx>
Signed-off-by: Jiang Yunhong <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Wen Congyang <wency@xxxxxxxxxxxxxx>
Cc: Shriram Rajagopalan <rshriram@xxxxxxxxx>
---
 tools/blktap2/drivers/block-aio.c | 36 +++++++++++++++++++++++++++++++++---
 1 file changed, 33 insertions(+), 3 deletions(-)

diff --git a/tools/blktap2/drivers/block-aio.c 
b/tools/blktap2/drivers/block-aio.c
index f398da2..10ab20b 100644
--- a/tools/blktap2/drivers/block-aio.c
+++ b/tools/blktap2/drivers/block-aio.c
@@ -55,9 +55,10 @@ struct tdaio_state {
        int                  fd;
        td_driver_t         *driver;
 
+       int                  aio_max_count;
        int                  aio_free_count;    
        struct aio_request   aio_requests[MAX_AIO_REQS];
-       struct aio_request  *aio_free_list[MAX_AIO_REQS];
+       struct aio_request   **aio_free_list;
 };
 
 /*Get Image size, secsize*/
@@ -122,6 +123,11 @@ int tdaio_open(td_driver_t *driver, const char *name, 
td_flag_t flags)
 
        memset(prv, 0, sizeof(struct tdaio_state));
 
+       prv->aio_free_list = malloc(MAX_AIO_REQS * sizeof(*prv->aio_free_list));
+       if (!prv->aio_free_list)
+               return -ENOMEM;
+
+       prv->aio_max_count = MAX_AIO_REQS;
        prv->aio_free_count = MAX_AIO_REQS;
        for (i = 0; i < MAX_AIO_REQS; i++)
                prv->aio_free_list[i] = &prv->aio_requests[i];
@@ -159,6 +165,28 @@ done:
        return ret;     
 }
 
+static int tdaio_refill(struct tdaio_state *prv)
+{
+       struct aio_request **new, *new_req;
+       int i, max = prv->aio_max_count + MAX_AIO_REQS;
+
+       new = realloc(prv->aio_free_list, max * sizeof(*prv->aio_free_list));
+       if (!new)
+               return -1;
+       prv->aio_free_list = new;
+
+       new_req = calloc(MAX_AIO_REQS, sizeof(*new_req));
+       if (!new_req)
+               return -1;
+
+       prv->aio_max_count = max;
+       prv->aio_free_count = MAX_AIO_REQS;
+       for (i = 0; i < MAX_AIO_REQS; i++)
+               prv->aio_free_list[i] = &new_req[i];
+
+       return 0;
+}
+
 void tdaio_complete(void *arg, struct tiocb *tiocb, int err)
 {
        struct aio_request *aio = (struct aio_request *)arg;
@@ -207,8 +235,10 @@ void tdaio_queue_write(td_driver_t *driver, td_request_t 
treq)
        size    = treq.secs * driver->info.sector_size;
        offset  = treq.sec  * (uint64_t)driver->info.sector_size;
 
-       if (prv->aio_free_count == 0)
-               goto fail;
+       if (prv->aio_free_count == 0) {
+               if (tdaio_refill(prv) < 0)
+                       goto fail;
+       }
 
        aio        = prv->aio_free_list[--prv->aio_free_count];
        aio->treq  = treq;
-- 
1.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.