WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] bimodal blkback: Support multiple ring pr

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] bimodal blkback: Support multiple ring protocols.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 25 Jan 2007 08:55:10 -0800
Delivery-date: Thu, 25 Jan 2007 08:56:33 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1169635097 0
# Node ID c9ac0bace498d1c25f07df95b88d8f8e89168514
# Parent  349e95826a8fa2b2485f04b6eb2bece27c970add
bimodal blkback: Support multiple ring protocols.

This is needed for 32-on-64 support.  Right now there are three
protocols: native, x86_32 and x86_64.  If needed it can be extended.

Interface changes (io/blkif.h)
 * Define the x86_32 and x86_64 structs additionally to the native
   version.
 * Add helper functions to convert them requests to native.

Backend changes:
 * Look at the "protocol" name of the frontend and switch ring
   handling accordingly.  If the protocol node isn't present it
   assumes native protocol.
 * As the request struct is copied anyway before being processed (for
   security reasons) it is converted to native at that point so most
   backend code doesn't need to know what the frontend speaks.
 * In case of blktap this is completely transparent to userspace, the
   kernel/userspace ring is always native no matter what the frontend
   speaks.

Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c   |   75 ++++++++++----
 linux-2.6-xen-sparse/drivers/xen/blkback/common.h    |    6 -
 linux-2.6-xen-sparse/drivers/xen/blkback/interface.c |   32 +++++-
 linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c    |   19 +++
 linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c     |   78 ++++++++++-----
 linux-2.6-xen-sparse/drivers/xen/blktap/common.h     |    6 -
 linux-2.6-xen-sparse/drivers/xen/blktap/interface.c  |   32 +++++-
 linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c     |   19 +++
 linux-2.6-xen-sparse/include/xen/blkif.h             |   97 +++++++++++++++++++
 xen/include/public/io/blkif.h                        |   14 +-
 10 files changed, 310 insertions(+), 68 deletions(-)

diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Wed Jan 24 
10:22:06 2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Wed Jan 24 
10:38:17 2007 +0000
@@ -298,17 +298,20 @@ irqreturn_t blkif_be_int(int irq, void *
 
 static int do_block_io_op(blkif_t *blkif)
 {
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        blkif_request_t req;
        pending_req_t *pending_req;
        RING_IDX rc, rp;
        int more_to_do = 0;
 
-       rc = blk_ring->req_cons;
-       rp = blk_ring->sring->req_prod;
+       rc = blk_rings->common.req_cons;
+       rp = blk_rings->common.sring->req_prod;
        rmb(); /* Ensure we see queued requests up to 'rp'. */
 
-       while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
+       while ((rc != rp)) {
+
+               if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
+                       break;
 
                pending_req = alloc_req();
                if (NULL == pending_req) {
@@ -317,8 +320,20 @@ static int do_block_io_op(blkif_t *blkif
                        break;
                }
 
-               memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req));
-               blk_ring->req_cons = ++rc; /* before make_response() */
+               switch (blkif->blk_protocol) {
+               case BLKIF_PROTOCOL_NATIVE:
+                       memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), 
sizeof(req));
+                       break;
+               case BLKIF_PROTOCOL_X86_32:
+                       blkif_get_x86_32_req(&req, 
RING_GET_REQUEST(&blk_rings->x86_32, rc));
+                       break;
+               case BLKIF_PROTOCOL_X86_64:
+                       blkif_get_x86_64_req(&req, 
RING_GET_REQUEST(&blk_rings->x86_64, rc));
+                       break;
+               default:
+                       BUG();
+               }
+               blk_rings->common.req_cons = ++rc; /* before make_response() */
 
                switch (req.operation) {
                case BLKIF_OP_READ:
@@ -498,34 +513,48 @@ static void make_response(blkif_t *blkif
 static void make_response(blkif_t *blkif, unsigned long id, 
                          unsigned short op, int st)
 {
-       blkif_response_t *resp;
+       blkif_response_t  resp;
        unsigned long     flags;
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        int more_to_do = 0;
        int notify;
 
+       resp.id        = id;
+       resp.operation = op;
+       resp.status    = st;
+
        spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-
-       /* Place on the response ring for the relevant domain. */ 
-       resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-       resp->id        = id;
-       resp->operation = op;
-       resp->status    = st;
-       blk_ring->rsp_prod_pvt++;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
-
-       if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
+       /* Place on the response ring for the relevant domain. */
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+               memcpy(RING_GET_RESPONSE(&blk_rings->native, 
blk_rings->native.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_32:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, 
blk_rings->x86_32.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_64:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, 
blk_rings->x86_64.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       default:
+               BUG();
+       }
+       blk_rings->common.rsp_prod_pvt++;
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+       if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
                /*
                 * Tail check for pending requests. Allows frontend to avoid
                 * notifications if requests are already in flight (lower
                 * overheads and promotes batching).
                 */
-               RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
-
-       } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
+               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+
+       } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
                more_to_do = 1;
-
-       }
+       }
+
        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
 
        if (more_to_do)
diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blkback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Wed Jan 24 10:22:06 
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Wed Jan 24 10:38:17 
2007 +0000
@@ -40,8 +40,7 @@
 #include <asm/pgalloc.h>
 #include <xen/evtchn.h>
 #include <asm/hypervisor.h>
-#include <xen/interface/io/blkif.h>
-#include <xen/interface/io/ring.h>
+#include <xen/blkif.h>
 #include <xen/gnttab.h>
 #include <xen/driver_util.h>
 #include <xen/xenbus.h>
@@ -67,7 +66,8 @@ typedef struct blkif_st {
        /* Physical parameters of the comms window. */
        unsigned int      irq;
        /* Comms information. */
-       blkif_back_ring_t blk_ring;
+       enum blkif_protocol blk_protocol;
+       blkif_back_rings_t blk_rings;
        struct vm_struct *blk_ring_area;
        /* The VBD attached to this interface. */
        struct vbd        vbd;
diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blkback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Wed Jan 24 
10:22:06 2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Wed Jan 24 
10:38:17 2007 +0000
@@ -95,7 +95,6 @@ static void unmap_frontend_page(blkif_t 
 
 int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
 {
-       blkif_sring_t *sring;
        int err;
 
        /* Already connected through? */
@@ -111,8 +110,31 @@ int blkif_map(blkif_t *blkif, unsigned l
                return err;
        }
 
-       sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-       BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+       {
+               blkif_sring_t *sring;
+               sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_32:
+       {
+               blkif_x86_32_sring_t *sring_x86_32;
+               sring_x86_32 = (blkif_x86_32_sring_t 
*)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, 
PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_64:
+       {
+               blkif_x86_64_sring_t *sring_x86_64;
+               sring_x86_64 = (blkif_x86_64_sring_t 
*)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, 
PAGE_SIZE);
+               break;
+       }
+       default:
+               BUG();
+       }
 
        err = bind_interdomain_evtchn_to_irqhandler(
                blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
@@ -143,10 +165,10 @@ void blkif_disconnect(blkif_t *blkif)
                blkif->irq = 0;
        }
 
-       if (blkif->blk_ring.sring) {
+       if (blkif->blk_rings.common.sring) {
                unmap_frontend_page(blkif);
                free_vm_area(blkif->blk_ring_area);
-               blkif->blk_ring.sring = NULL;
+               blkif->blk_rings.common.sring = NULL;
        }
 }
 
diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Wed Jan 24 10:22:06 
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Wed Jan 24 10:38:17 
2007 +0000
@@ -459,6 +459,7 @@ static int connect_ring(struct backend_i
        struct xenbus_device *dev = be->dev;
        unsigned long ring_ref;
        unsigned int evtchn;
+       char protocol[64] = "";
        int err;
 
        DPRINTK("%s", dev->otherend);
@@ -471,6 +472,24 @@ static int connect_ring(struct backend_i
                                 dev->otherend);
                return err;
        }
+
+       be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+                           "%63s", protocol, NULL);
+       if (err)
+               strcpy(protocol, "unspecified, assuming native");
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
+       else {
+               xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+               return -1;
+       }
+       printk("blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
+              ring_ref, evtchn, be->blkif->blk_protocol, protocol);
 
        /* Map the shared frame, irq etc. */
        err = blkif_map(be->blkif, ring_ref, evtchn);
diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Wed Jan 24 10:22:06 
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Wed Jan 24 10:38:17 
2007 +0000
@@ -1091,15 +1091,15 @@ static int print_dbug = 1;
 static int print_dbug = 1;
 static int do_block_io_op(blkif_t *blkif)
 {
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        blkif_request_t req;
        pending_req_t *pending_req;
        RING_IDX rc, rp;
        int more_to_do = 0;
        tap_blkif_t *info;
 
-       rc = blk_ring->req_cons;
-       rp = blk_ring->sring->req_prod;
+       rc = blk_rings->common.req_cons;
+       rp = blk_rings->common.sring->req_prod;
        rmb(); /* Ensure we see queued requests up to 'rp'. */
 
        /*Check blkif has corresponding UE ring*/
@@ -1130,8 +1130,8 @@ static int do_block_io_op(blkif_t *blkif
                        more_to_do = 1;
                        break;
                }
-               
-               if (RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
+
+               if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
                        WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
                               " More to do\n");
                        more_to_do = 1;
@@ -1145,8 +1145,21 @@ static int do_block_io_op(blkif_t *blkif
                        break;
                }
 
-               memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req));
-               blk_ring->req_cons = ++rc; /* before make_response() */ 
+               switch (blkif->blk_protocol) {
+               case BLKIF_PROTOCOL_NATIVE:
+                       memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
+                              sizeof(req));
+                       break;
+               case BLKIF_PROTOCOL_X86_32:
+                       blkif_get_x86_32_req(&req, 
RING_GET_REQUEST(&blk_rings->x86_32, rc));
+                       break;
+               case BLKIF_PROTOCOL_X86_64:
+                       blkif_get_x86_64_req(&req, 
RING_GET_REQUEST(&blk_rings->x86_64, rc));
+                       break;
+               default:
+                       BUG();
+               }
+               blk_rings->common.req_cons = ++rc; /* before make_response() */
 
                switch (req.operation) {
                case BLKIF_OP_READ:
@@ -1222,7 +1235,7 @@ static void dispatch_rw_block_io(blkif_t
                WPRINTK("blktap: fe_ring is full, can't add "
                        "IO Request will be dropped. %d %d\n",
                        RING_SIZE(&info->ufe_ring),
-                       RING_SIZE(&blkif->blk_ring));
+                       RING_SIZE(&blkif->blk_rings.common));
                goto fail_response;
        }
 
@@ -1410,32 +1423,51 @@ static void make_response(blkif_t *blkif
 static void make_response(blkif_t *blkif, unsigned long id, 
                           unsigned short op, int st)
 {
-       blkif_response_t *resp;
+       blkif_response_t  resp;
        unsigned long     flags;
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        int more_to_do = 0;
        int notify;
 
+       resp.id        = id;
+       resp.operation = op;
+       resp.status    = st;
+
        spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-       /* Place on the response ring for the relevant domain. */ 
-       resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-       resp->id        = id;
-       resp->operation = op;
-       resp->status    = st;
-       blk_ring->rsp_prod_pvt++;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
-
-       if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
+       /* Place on the response ring for the relevant domain. */
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+               memcpy(RING_GET_RESPONSE(&blk_rings->native,
+                                        blk_rings->native.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_32:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
+                                        blk_rings->x86_32.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_64:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
+                                        blk_rings->x86_64.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       default:
+               BUG();
+       }
+       blk_rings->common.rsp_prod_pvt++;
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+
+       if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
                /*
                 * Tail check for pending requests. Allows frontend to avoid
                 * notifications if requests are already in flight (lower
                 * overheads and promotes batching).
                 */
-               RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
-       } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
+               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+       } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
                more_to_do = 1;
-
-       }       
+       }
+
        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
        if (more_to_do)
                blkif_notify_work(blkif);
diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blktap/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h  Wed Jan 24 10:22:06 
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h  Wed Jan 24 10:38:17 
2007 +0000
@@ -39,8 +39,7 @@
 #include <asm/pgalloc.h>
 #include <xen/evtchn.h>
 #include <asm/hypervisor.h>
-#include <xen/interface/io/blkif.h>
-#include <xen/interface/io/ring.h>
+#include <xen/blkif.h>
 #include <xen/gnttab.h>
 #include <xen/driver_util.h>
 
@@ -58,7 +57,8 @@ typedef struct blkif_st {
        /* Physical parameters of the comms window. */
        unsigned int      irq;
        /* Comms information. */
-       blkif_back_ring_t blk_ring;
+       enum blkif_protocol blk_protocol;
+       blkif_back_rings_t blk_rings;
        struct vm_struct *blk_ring_area;
        /* Back pointer to the backend_info. */
        struct backend_info *be;
diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blktap/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c       Wed Jan 24 
10:22:06 2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c       Wed Jan 24 
10:38:17 2007 +0000
@@ -96,7 +96,6 @@ int tap_blkif_map(blkif_t *blkif, unsign
 int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
                  unsigned int evtchn)
 {
-       blkif_sring_t *sring;
        int err;
 
        /* Already connected through? */
@@ -112,8 +111,31 @@ int tap_blkif_map(blkif_t *blkif, unsign
                return err;
        }
 
-       sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-       BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+       {
+               blkif_sring_t *sring;
+               sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_32:
+       {
+               blkif_x86_32_sring_t *sring_x86_32;
+               sring_x86_32 = (blkif_x86_32_sring_t 
*)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, 
PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_64:
+       {
+               blkif_x86_64_sring_t *sring_x86_64;
+               sring_x86_64 = (blkif_x86_64_sring_t 
*)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, 
PAGE_SIZE);
+               break;
+       }
+       default:
+               BUG();
+       }
 
        err = bind_interdomain_evtchn_to_irqhandler(
                blkif->domid, evtchn, tap_blkif_be_int,
@@ -134,10 +156,10 @@ void tap_blkif_unmap(blkif_t *blkif)
                unbind_from_irqhandler(blkif->irq, blkif);
                blkif->irq = 0;
        }
-       if (blkif->blk_ring.sring) {
+       if (blkif->blk_rings.common.sring) {
                unmap_frontend_page(blkif);
                free_vm_area(blkif->blk_ring_area);
-               blkif->blk_ring.sring = NULL;
+               blkif->blk_rings.common.sring = NULL;
        }
 }
 
diff -r 349e95826a8f -r c9ac0bace498 
linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c  Wed Jan 24 10:22:06 
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c  Wed Jan 24 10:38:17 
2007 +0000
@@ -340,6 +340,7 @@ static int connect_ring(struct backend_i
        struct xenbus_device *dev = be->dev;
        unsigned long ring_ref;
        unsigned int evtchn;
+       char protocol[64];
        int err;
 
        DPRINTK("%s\n", dev->otherend);
@@ -352,6 +353,24 @@ static int connect_ring(struct backend_i
                                 dev->otherend);
                return err;
        }
+
+       be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+                           "%63s", protocol, NULL);
+       if (err)
+               strcpy(protocol, "unspecified, assuming native");
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
+       else {
+               xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+               return -1;
+       }
+       printk("blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
+              ring_ref, evtchn, be->blkif->blk_protocol, protocol);
 
        /* Map the shared frame, irq etc. */
        err = tap_blkif_map(be->blkif, ring_ref, evtchn);
diff -r 349e95826a8f -r c9ac0bace498 linux-2.6-xen-sparse/include/xen/blkif.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/include/xen/blkif.h  Wed Jan 24 10:38:17 2007 +0000
@@ -0,0 +1,97 @@
+#ifndef __XEN_BLKIF_H__
+#define __XEN_BLKIF_H__
+
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/blkif.h>
+#include <xen/interface/io/protocols.h>
+
+/* Not a real protocol.  Used to generate ring structs which contain
+ * the elements common to all protocols only.  This way we get a
+ * compiler-checkable way to use common struct elements, so we can
+ * avoid using switch(protocol) in a number of places.  */
+struct blkif_common_request {
+       char dummy;
+};
+struct blkif_common_response {
+       char dummy;
+};
+
+/* i386 protocol version */
+#pragma pack(push, 4)
+struct blkif_x86_32_request {
+       uint8_t        operation;    /* BLKIF_OP_???                         */
+       uint8_t        nr_segments;  /* number of segments                   */
+       blkif_vdev_t   handle;       /* only for read/write requests         */
+       uint64_t       id;           /* private guest value, echoed in resp  */
+       blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+struct blkif_x86_32_response {
+       uint64_t        id;              /* copied from request */
+       uint8_t         operation;       /* copied from request */
+       int16_t         status;          /* BLKIF_RSP_???       */
+};
+typedef struct blkif_x86_32_request blkif_x86_32_request_t;
+typedef struct blkif_x86_32_response blkif_x86_32_response_t;
+#pragma pack(pop)
+
+/* x86_64 protocol version */
+struct blkif_x86_64_request {
+       uint8_t        operation;    /* BLKIF_OP_???                         */
+       uint8_t        nr_segments;  /* number of segments                   */
+       blkif_vdev_t   handle;       /* only for read/write requests         */
+       uint64_t       __attribute__((__aligned__(8))) id;
+       blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+struct blkif_x86_64_response {
+       uint64_t       __attribute__((__aligned__(8))) id;
+       uint8_t         operation;       /* copied from request */
+       int16_t         status;          /* BLKIF_RSP_???       */
+};
+typedef struct blkif_x86_64_request blkif_x86_64_request_t;
+typedef struct blkif_x86_64_response blkif_x86_64_response_t;
+
+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct 
blkif_common_response);
+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct 
blkif_x86_32_response);
+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct 
blkif_x86_64_response);
+
+union blkif_back_rings {
+       blkif_back_ring_t        native;
+       blkif_common_back_ring_t common;
+       blkif_x86_32_back_ring_t x86_32;
+       blkif_x86_64_back_ring_t x86_64;
+};
+typedef union blkif_back_rings blkif_back_rings_t;
+
+enum blkif_protocol {
+       BLKIF_PROTOCOL_NATIVE = 1,
+       BLKIF_PROTOCOL_X86_32 = 2,
+       BLKIF_PROTOCOL_X86_64 = 3,
+};
+
+static void inline blkif_get_x86_32_req(blkif_request_t *dst, 
blkif_x86_32_request_t *src)
+{
+       int i;
+       dst->operation = src->operation;
+       dst->nr_segments = src->nr_segments;
+       dst->handle = src->handle;
+       dst->id = src->id;
+       dst->sector_number = src->sector_number;
+       for (i = 0; i < src->nr_segments; i++)
+               dst->seg[i] = src->seg[i];
+}
+
+static void inline blkif_get_x86_64_req(blkif_request_t *dst, 
blkif_x86_64_request_t *src)
+{
+       int i;
+       dst->operation = src->operation;
+       dst->nr_segments = src->nr_segments;
+       dst->handle = src->handle;
+       dst->id = src->id;
+       dst->sector_number = src->sector_number;
+       for (i = 0; i < src->nr_segments; i++)
+               dst->seg[i] = src->seg[i];
+}
+
+#endif /* __XEN_BLKIF_H__ */
diff -r 349e95826a8f -r c9ac0bace498 xen/include/public/io/blkif.h
--- a/xen/include/public/io/blkif.h     Wed Jan 24 10:22:06 2007 +0000
+++ b/xen/include/public/io/blkif.h     Wed Jan 24 10:38:17 2007 +0000
@@ -71,18 +71,20 @@
  */
 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 
+struct blkif_request_segment {
+    grant_ref_t gref;        /* reference to I/O buffer frame        */
+    /* @first_sect: first sector in frame to transfer (inclusive).   */
+    /* @last_sect: last sector in frame to transfer (inclusive).     */
+    uint8_t     first_sect, last_sect;
+};
+
 struct blkif_request {
     uint8_t        operation;    /* BLKIF_OP_???                         */
     uint8_t        nr_segments;  /* number of segments                   */
     blkif_vdev_t   handle;       /* only for read/write requests         */
     uint64_t       id;           /* private guest value, echoed in resp  */
     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-    struct blkif_request_segment {
-        grant_ref_t gref;        /* reference to I/O buffer frame        */
-        /* @first_sect: first sector in frame to transfer (inclusive).   */
-        /* @last_sect: last sector in frame to transfer (inclusive).     */
-        uint8_t     first_sect, last_sect;
-    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 };
 typedef struct blkif_request blkif_request_t;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] bimodal blkback: Support multiple ring protocols., Xen patchbot-unstable <=