WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Merged.

# HG changeset patch
# User emellor@ewan
# Node ID 10759a44ce3b9431b730eadde2417777ca8f9943
# Parent  eba5afe9aa37d3cb96c22cd93b8b7ec7c8e3e1ca
# Parent  28db21fb75453d80636142432b9e324fcd472c52
Merged.

diff -r eba5afe9aa37 -r 10759a44ce3b linux-2.6-xen-sparse/arch/xen/Kconfig
--- a/linux-2.6-xen-sparse/arch/xen/Kconfig     Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/Kconfig     Thu Sep 22 15:12:14 2005
@@ -110,13 +110,6 @@
          network interfaces within another guest OS. Unless you are building a
          dedicated device-driver domain, or your master control domain
          (domain 0), then you almost certainly want to say Y here.
-
-config XEN_NETDEV_GRANT
-        bool "Grant table substrate for network drivers (DANGEROUS)"
-        default n
-        help
-          This introduces the use of grant tables as a data exhange mechanism
-          between the frontend and backend network drivers.
 
 config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
        bool "Pipelined transmitter (DANGEROUS)"
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32       Thu Sep 
22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32       Thu Sep 
22 15:12:14 2005
@@ -19,7 +19,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Thu Sep 
22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Thu Sep 
22 15:12:14 2005
@@ -19,7 +19,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32       Thu Sep 
22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32       Thu Sep 
22 15:12:14 2005
@@ -16,7 +16,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64       Thu Sep 
22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64       Thu Sep 
22 15:12:14 2005
@@ -16,7 +16,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32        Thu Sep 
22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32        Thu Sep 
22 15:12:14 2005
@@ -19,7 +19,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64        Thu Sep 
22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64        Thu Sep 
22 15:12:14 2005
@@ -19,7 +19,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Thu Sep 22 
15:12:14 2005
@@ -28,12 +28,12 @@
 #define BATCH_PER_DOMAIN 16
 
 static unsigned long mmap_vstart;
-#define MMAP_PAGES                                              \
-    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-#define MMAP_VADDR(_req,_seg)                                   \
-    (mmap_vstart +                                              \
-     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
-     ((_seg) * PAGE_SIZE))
+#define MMAP_PAGES                                             \
+       (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+#define MMAP_VADDR(_req,_seg)                                          \
+       (mmap_vstart +                                                  \
+        ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +        \
+        ((_seg) * PAGE_SIZE))
 
 /*
  * Each outstanding request that we've passed to the lower device layers has a 
@@ -42,12 +42,12 @@
  * response queued for it, with the saved 'id' passed back.
  */
 typedef struct {
-    blkif_t       *blkif;
-    unsigned long  id;
-    int            nr_pages;
-    atomic_t       pendcnt;
-    unsigned short operation;
-    int            status;
+       blkif_t       *blkif;
+       unsigned long  id;
+       int            nr_pages;
+       atomic_t       pendcnt;
+       unsigned short operation;
+       int            status;
 } pending_req_t;
 
 /*
@@ -68,14 +68,13 @@
 static request_queue_t *plugged_queue;
 static inline void flush_plugged_queue(void)
 {
-    request_queue_t *q = plugged_queue;
-    if ( q != NULL )
-    {
-        if ( q->unplug_fn != NULL )
-            q->unplug_fn(q);
-        blk_put_queue(q);
-        plugged_queue = NULL;
-    }
+       request_queue_t *q = plugged_queue;
+       if (q != NULL) {
+               if ( q->unplug_fn != NULL )
+                       q->unplug_fn(q);
+               blk_put_queue(q);
+               plugged_queue = NULL;
+       }
 }
 
 /* When using grant tables to map a frame for device access then the
@@ -106,24 +105,23 @@
 
 static void fast_flush_area(int idx, int nr_pages)
 {
-    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    unsigned int i, invcount = 0;
-    u16 handle;
-
-    for ( i = 0; i < nr_pages; i++ )
-    {
-        if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) )
-        {
-            unmap[i].host_addr      = MMAP_VADDR(idx, i);
-            unmap[i].dev_bus_addr   = 0;
-            unmap[i].handle         = handle;
-            pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
-            invcount++;
-        }
-    }
-    if ( unlikely(HYPERVISOR_grant_table_op(
-                    GNTTABOP_unmap_grant_ref, unmap, invcount)))
-        BUG();
+       struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       unsigned int i, invcount = 0;
+       u16 handle;
+
+       for (i = 0; i < nr_pages; i++) {
+               handle = pending_handle(idx, i);
+               if (handle == BLKBACK_INVALID_HANDLE)
+                       continue;
+               unmap[i].host_addr      = MMAP_VADDR(idx, i);
+               unmap[i].dev_bus_addr   = 0;
+               unmap[i].handle         = handle;
+               pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
+               invcount++;
+       }
+
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_unmap_grant_ref, unmap, invcount));
 }
 
 
@@ -136,34 +134,38 @@
 
 static int __on_blkdev_list(blkif_t *blkif)
 {
-    return blkif->blkdev_list.next != NULL;
+       return blkif->blkdev_list.next != NULL;
 }
 
 static void remove_from_blkdev_list(blkif_t *blkif)
 {
-    unsigned long flags;
-    if ( !__on_blkdev_list(blkif) ) return;
-    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-    if ( __on_blkdev_list(blkif) )
-    {
-        list_del(&blkif->blkdev_list);
-        blkif->blkdev_list.next = NULL;
-        blkif_put(blkif);
-    }
-    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+       unsigned long flags;
+
+       if (!__on_blkdev_list(blkif))
+               return;
+
+       spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+       if (__on_blkdev_list(blkif)) {
+               list_del(&blkif->blkdev_list);
+               blkif->blkdev_list.next = NULL;
+               blkif_put(blkif);
+       }
+       spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
 }
 
 static void add_to_blkdev_list_tail(blkif_t *blkif)
 {
-    unsigned long flags;
-    if ( __on_blkdev_list(blkif) ) return;
-    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
-    {
-        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
-        blkif_get(blkif);
-    }
-    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+       unsigned long flags;
+
+       if (__on_blkdev_list(blkif))
+               return;
+
+       spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+       if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
+               list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+               blkif_get(blkif);
+       }
+       spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
 }
 
 
@@ -175,54 +177,53 @@
 
 static int blkio_schedule(void *arg)
 {
-    DECLARE_WAITQUEUE(wq, current);
-
-    blkif_t          *blkif;
-    struct list_head *ent;
-
-    daemonize("xenblkd");
-
-    for ( ; ; )
-    {
-        /* Wait for work to do. */
-        add_wait_queue(&blkio_schedule_wait, &wq);
-        set_current_state(TASK_INTERRUPTIBLE);
-        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
-             list_empty(&blkio_schedule_list) )
-            schedule();
-        __set_current_state(TASK_RUNNING);
-        remove_wait_queue(&blkio_schedule_wait, &wq);
-
-        /* Queue up a batch of requests. */
-        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
-                !list_empty(&blkio_schedule_list) )
-        {
-            ent = blkio_schedule_list.next;
-            blkif = list_entry(ent, blkif_t, blkdev_list);
-            blkif_get(blkif);
-            remove_from_blkdev_list(blkif);
-            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
-                add_to_blkdev_list_tail(blkif);
-            blkif_put(blkif);
-        }
-
-        /* Push the batch through to disc. */
-        flush_plugged_queue();
-    }
+       DECLARE_WAITQUEUE(wq, current);
+
+       blkif_t          *blkif;
+       struct list_head *ent;
+
+       daemonize("xenblkd");
+
+       for (;;) {
+               /* Wait for work to do. */
+               add_wait_queue(&blkio_schedule_wait, &wq);
+               set_current_state(TASK_INTERRUPTIBLE);
+               if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
+                    list_empty(&blkio_schedule_list) )
+                       schedule();
+               __set_current_state(TASK_RUNNING);
+               remove_wait_queue(&blkio_schedule_wait, &wq);
+
+               /* Queue up a batch of requests. */
+               while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
+                      !list_empty(&blkio_schedule_list)) {
+                       ent = blkio_schedule_list.next;
+                       blkif = list_entry(ent, blkif_t, blkdev_list);
+                       blkif_get(blkif);
+                       remove_from_blkdev_list(blkif);
+                       if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
+                               add_to_blkdev_list_tail(blkif);
+                       blkif_put(blkif);
+               }
+
+               /* Push the batch through to disc. */
+               flush_plugged_queue();
+       }
 }
 
 static void maybe_trigger_blkio_schedule(void)
 {
-    /*
-     * Needed so that two processes, who together make the following predicate
-     * true, don't both read stale values and evaluate the predicate
-     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
-     */
-    smp_mb();
-
-    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-         !list_empty(&blkio_schedule_list) )
-        wake_up(&blkio_schedule_wait);
+       /*
+        * Needed so that two processes, which together make the following
+        * predicate true, don't both read stale values and evaluate the
+        * predicate incorrectly. Incredibly unlikely to stall the scheduler
+        * on x86, but...
+        */
+       smp_mb();
+
+       if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+           !list_empty(&blkio_schedule_list))
+               wake_up(&blkio_schedule_wait);
 }
 
 
@@ -233,36 +234,34 @@
 
 static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
 {
-    unsigned long flags;
-
-    /* An error fails the entire request. */
-    if ( !uptodate )
-    {
-        DPRINTK("Buffer not up-to-date at end of operation\n");
-        pending_req->status = BLKIF_RSP_ERROR;
-    }
-
-    if ( atomic_dec_and_test(&pending_req->pendcnt) )
-    {
-        int pending_idx = pending_req - pending_reqs;
-        fast_flush_area(pending_idx, pending_req->nr_pages);
-        make_response(pending_req->blkif, pending_req->id,
-                      pending_req->operation, pending_req->status);
-        blkif_put(pending_req->blkif);
-        spin_lock_irqsave(&pend_prod_lock, flags);
-        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-        spin_unlock_irqrestore(&pend_prod_lock, flags);
-        maybe_trigger_blkio_schedule();
-    }
+       unsigned long flags;
+
+       /* An error fails the entire request. */
+       if (!uptodate) {
+               DPRINTK("Buffer not up-to-date at end of operation\n");
+               pending_req->status = BLKIF_RSP_ERROR;
+       }
+
+       if (atomic_dec_and_test(&pending_req->pendcnt)) {
+               int pending_idx = pending_req - pending_reqs;
+               fast_flush_area(pending_idx, pending_req->nr_pages);
+               make_response(pending_req->blkif, pending_req->id,
+                             pending_req->operation, pending_req->status);
+               blkif_put(pending_req->blkif);
+               spin_lock_irqsave(&pend_prod_lock, flags);
+               pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+               spin_unlock_irqrestore(&pend_prod_lock, flags);
+               maybe_trigger_blkio_schedule();
+       }
 }
 
 static int end_block_io_op(struct bio *bio, unsigned int done, int error)
 {
-    if ( bio->bi_size != 0 )
-        return 1;
-    __end_block_io_op(bio->bi_private, !error);
-    bio_put(bio);
-    return error;
+       if (bio->bi_size != 0)
+               return 1;
+       __end_block_io_op(bio->bi_private, !error);
+       bio_put(bio);
+       return error;
 }
 
 
@@ -272,10 +271,10 @@
 
 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 {
-    blkif_t *blkif = dev_id;
-    add_to_blkdev_list_tail(blkif);
-    maybe_trigger_blkio_schedule();
-    return IRQ_HANDLED;
+       blkif_t *blkif = dev_id;
+       add_to_blkdev_list_tail(blkif);
+       maybe_trigger_blkio_schedule();
+       return IRQ_HANDLED;
 }
 
 
@@ -286,183 +285,174 @@
 
 static int do_block_io_op(blkif_t *blkif, int max_to_do)
 {
-    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-    blkif_request_t *req;
-    RING_IDX i, rp;
-    int more_to_do = 0;
-
-    rp = blk_ring->sring->req_prod;
-    rmb(); /* Ensure we see queued requests up to 'rp'. */
-
-    for ( i = blk_ring->req_cons; 
-         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
-          i++ )
-    {
-        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
-        {
-            more_to_do = 1;
-            break;
-        }
+       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_request_t *req;
+       RING_IDX i, rp;
+       int more_to_do = 0;
+
+       rp = blk_ring->sring->req_prod;
+       rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+       for (i = blk_ring->req_cons; 
+            (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
+            i++) {
+               if ((max_to_do-- == 0) ||
+                   (NR_PENDING_REQS == MAX_PENDING_REQS)) {
+                       more_to_do = 1;
+                       break;
+               }
         
-        req = RING_GET_REQUEST(blk_ring, i);
-        switch ( req->operation )
-        {
-        case BLKIF_OP_READ:
-        case BLKIF_OP_WRITE:
-            dispatch_rw_block_io(blkif, req);
-            break;
-
-        default:
-            DPRINTK("error: unknown block io operation [%d]\n",
-                    req->operation);
-            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-            break;
-        }
-    }
-
-    blk_ring->req_cons = i;
-    return more_to_do;
+               req = RING_GET_REQUEST(blk_ring, i);
+               switch (req->operation) {
+               case BLKIF_OP_READ:
+               case BLKIF_OP_WRITE:
+                       dispatch_rw_block_io(blkif, req);
+                       break;
+
+               default:
+                       DPRINTK("error: unknown block io operation [%d]\n",
+                               req->operation);
+                       make_response(blkif, req->id, req->operation,
+                                     BLKIF_RSP_ERROR);
+                       break;
+               }
+       }
+
+       blk_ring->req_cons = i;
+       return more_to_do;
 }
 
 static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
 {
-    extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
-    int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
-    unsigned long fas = 0;
-    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-    pending_req_t *pending_req;
-    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    struct phys_req preq;
-    struct { 
-        unsigned long buf; unsigned int nsec;
-    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    unsigned int nseg;
-    struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    int nbio = 0;
-    request_queue_t *q;
-
-    /* Check that number of segments is sane. */
-    nseg = req->nr_segments;
-    if ( unlikely(nseg == 0) || 
-         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
-    {
-        DPRINTK("Bad number of segments in request (%d)\n", nseg);
-        goto bad_descriptor;
-    }
-
-    preq.dev           = req->handle;
-    preq.sector_number = req->sector_number;
-    preq.nr_sects      = 0;
-
-    for ( i = 0; i < nseg; i++ )
-    {
-        fas         = req->frame_and_sects[i];
-        seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
-
-        if ( seg[i].nsec <= 0 )
-            goto bad_descriptor;
-        preq.nr_sects += seg[i].nsec;
-
-        map[i].host_addr = MMAP_VADDR(pending_idx, i);
-        map[i].dom = blkif->domid;
-        map[i].ref = blkif_gref_from_fas(fas);
-        map[i].flags = GNTMAP_host_map;
-        if ( operation == WRITE )
-            map[i].flags |= GNTMAP_readonly;
-    }
-
-    if ( unlikely(HYPERVISOR_grant_table_op(
-                    GNTTABOP_map_grant_ref, map, nseg)))
-        BUG();
-
-    for ( i = 0; i < nseg; i++ )
-    {
-        if ( unlikely(map[i].handle < 0) )
-        {
-            DPRINTK("invalid buffer -- could not remap it\n");
-            fast_flush_area(pending_idx, nseg);
-            goto bad_descriptor;
-        }
-
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
-            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-
-        pending_handle(pending_idx, i) = map[i].handle;
-    }
-
-    for ( i = 0; i < nseg; i++ )
-    {
-        fas         = req->frame_and_sects[i];
-        seg[i].buf  = map[i].dev_bus_addr | (blkif_first_sect(fas) << 9);
-    }
-
-    if ( vbd_translate(&preq, blkif, operation) != 0 )
-    {
-        DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
-                operation == READ ? "read" : "write", preq.sector_number,
-                preq.sector_number + preq.nr_sects, preq.dev); 
-        goto bad_descriptor;
-    }
-
-    pending_req = &pending_reqs[pending_idx];
-    pending_req->blkif     = blkif;
-    pending_req->id        = req->id;
-    pending_req->operation = operation;
-    pending_req->status    = BLKIF_RSP_OKAY;
-    pending_req->nr_pages  = nseg;
-
-    for ( i = 0; i < nseg; i++ )
-    {
-        if ( ((int)preq.sector_number|(int)seg[i].nsec) &
-             ((bdev_hardsect_size(preq.bdev) >> 9) - 1) )
-        {
-            DPRINTK("Misaligned I/O request from domain %d", blkif->domid);
-            goto cleanup_and_fail;
-        }
-
-        while ( (bio == NULL) ||
-                (bio_add_page(bio,
-                              virt_to_page(MMAP_VADDR(pending_idx, i)),
-                              seg[i].nsec << 9,
-                              seg[i].buf & ~PAGE_MASK) == 0) )
-        {
-            bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
-            if ( unlikely(bio == NULL) )
-            {
-            cleanup_and_fail:
-                for ( i = 0; i < (nbio-1); i++ )
-                    bio_put(biolist[i]);
-                fast_flush_area(pending_idx, nseg);
-                goto bad_descriptor;
-            }
+       extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
+       int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
+       unsigned long fas = 0;
+       int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+       pending_req_t *pending_req;
+       struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       struct phys_req preq;
+       struct { 
+               unsigned long buf; unsigned int nsec;
+       } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       unsigned int nseg;
+       struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       int nbio = 0;
+       request_queue_t *q;
+
+       /* Check that number of segments is sane. */
+       nseg = req->nr_segments;
+       if (unlikely(nseg == 0) || 
+           unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+               DPRINTK("Bad number of segments in request (%d)\n", nseg);
+               goto bad_descriptor;
+       }
+
+       preq.dev           = req->handle;
+       preq.sector_number = req->sector_number;
+       preq.nr_sects      = 0;
+
+       for (i = 0; i < nseg; i++) {
+               fas         = req->frame_and_sects[i];
+               seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
+
+               if (seg[i].nsec <= 0)
+                       goto bad_descriptor;
+               preq.nr_sects += seg[i].nsec;
+
+               map[i].host_addr = MMAP_VADDR(pending_idx, i);
+               map[i].dom = blkif->domid;
+               map[i].ref = blkif_gref_from_fas(fas);
+               map[i].flags = GNTMAP_host_map;
+               if ( operation == WRITE )
+                       map[i].flags |= GNTMAP_readonly;
+       }
+
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_map_grant_ref, map, nseg));
+
+       for (i = 0; i < nseg; i++) {
+               if (unlikely(map[i].handle < 0)) {
+                       DPRINTK("invalid buffer -- could not remap it\n");
+                       fast_flush_area(pending_idx, nseg);
+                       goto bad_descriptor;
+               }
+
+               phys_to_machine_mapping[__pa(MMAP_VADDR(
+                       pending_idx, i)) >> PAGE_SHIFT] =
+                       FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
+
+               pending_handle(pending_idx, i) = map[i].handle;
+       }
+
+       for (i = 0; i < nseg; i++) {
+               fas         = req->frame_and_sects[i];
+               seg[i].buf  = map[i].dev_bus_addr | 
+                       (blkif_first_sect(fas) << 9);
+       }
+
+       if (vbd_translate(&preq, blkif, operation) != 0) {
+               DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
+                       operation == READ ? "read" : "write",
+                       preq.sector_number,
+                       preq.sector_number + preq.nr_sects, preq.dev); 
+               goto bad_descriptor;
+       }
+
+       pending_req = &pending_reqs[pending_idx];
+       pending_req->blkif     = blkif;
+       pending_req->id        = req->id;
+       pending_req->operation = operation;
+       pending_req->status    = BLKIF_RSP_OKAY;
+       pending_req->nr_pages  = nseg;
+
+       for (i = 0; i < nseg; i++) {
+               if (((int)preq.sector_number|(int)seg[i].nsec) &
+                   ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
+                       DPRINTK("Misaligned I/O request from domain %d",
+                               blkif->domid);
+                       goto cleanup_and_fail;
+               }
+
+               while ((bio == NULL) ||
+                      (bio_add_page(bio,
+                                    virt_to_page(MMAP_VADDR(pending_idx, i)),
+                                    seg[i].nsec << 9,
+                                    seg[i].buf & ~PAGE_MASK) == 0)) {
+                       bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
+                       if (unlikely(bio == NULL)) {
+                       cleanup_and_fail:
+                               for (i = 0; i < (nbio-1); i++)
+                                       bio_put(biolist[i]);
+                               fast_flush_area(pending_idx, nseg);
+                               goto bad_descriptor;
+                       }
                 
-            bio->bi_bdev    = preq.bdev;
-            bio->bi_private = pending_req;
-            bio->bi_end_io  = end_block_io_op;
-            bio->bi_sector  = preq.sector_number;
-        }
-
-        preq.sector_number += seg[i].nsec;
-    }
-
-    if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
-    {
-        flush_plugged_queue();
-        blk_get_queue(q);
-        plugged_queue = q;
-    }
-
-    atomic_set(&pending_req->pendcnt, nbio);
-    pending_cons++;
-    blkif_get(blkif);
-
-    for ( i = 0; i < nbio; i++ )
-        submit_bio(operation, biolist[i]);
-
-    return;
+                       bio->bi_bdev    = preq.bdev;
+                       bio->bi_private = pending_req;
+                       bio->bi_end_io  = end_block_io_op;
+                       bio->bi_sector  = preq.sector_number;
+               }
+
+               preq.sector_number += seg[i].nsec;
+       }
+
+       if ((q = bdev_get_queue(bio->bi_bdev)) != plugged_queue) {
+               flush_plugged_queue();
+               blk_get_queue(q);
+               plugged_queue = q;
+       }
+
+       atomic_set(&pending_req->pendcnt, nbio);
+       pending_cons++;
+       blkif_get(blkif);
+
+       for (i = 0; i < nbio; i++)
+               submit_bio(operation, biolist[i]);
+
+       return;
 
  bad_descriptor:
-    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+       make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
 } 
 
 
@@ -475,66 +465,71 @@
 static void make_response(blkif_t *blkif, unsigned long id, 
                           unsigned short op, int st)
 {
-    blkif_response_t *resp;
-    unsigned long     flags;
-    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-
-    /* Place on the response ring for the relevant domain. */ 
-    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-    resp->id        = id;
-    resp->operation = op;
-    resp->status    = st;
-    wmb(); /* Ensure other side can see the response fields. */
-    blk_ring->rsp_prod_pvt++;
-    RING_PUSH_RESPONSES(blk_ring);
-    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
-    /* Kick the relevant domain. */
-    notify_via_evtchn(blkif->evtchn);
+       blkif_response_t *resp;
+       unsigned long     flags;
+       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+
+       /* Place on the response ring for the relevant domain. */ 
+       spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+       resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
+       resp->id        = id;
+       resp->operation = op;
+       resp->status    = st;
+       wmb(); /* Ensure other side can see the response fields. */
+       blk_ring->rsp_prod_pvt++;
+       RING_PUSH_RESPONSES(blk_ring);
+       spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+
+       /* Kick the relevant domain. */
+       notify_via_evtchn(blkif->evtchn);
 }
 
 void blkif_deschedule(blkif_t *blkif)
 {
-    remove_from_blkdev_list(blkif);
+       remove_from_blkdev_list(blkif);
 }
 
 static int __init blkif_init(void)
 {
-    int i;
-    struct page *page;
-
-    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
-         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
-        return 0;
-
-    blkif_interface_init();
-
-    page = balloon_alloc_empty_page_range(MMAP_PAGES);
-    BUG_ON(page == NULL);
-    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-
-    pending_cons = 0;
-    pending_prod = MAX_PENDING_REQS;
-    memset(pending_reqs, 0, sizeof(pending_reqs));
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-        pending_ring[i] = i;
+       int i;
+       struct page *page;
+
+       if (!(xen_start_info->flags & SIF_INITDOMAIN) &&
+           !(xen_start_info->flags & SIF_BLK_BE_DOMAIN))
+               return 0;
+
+       blkif_interface_init();
+
+       page = balloon_alloc_empty_page_range(MMAP_PAGES);
+       BUG_ON(page == NULL);
+       mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+
+       pending_cons = 0;
+       pending_prod = MAX_PENDING_REQS;
+       memset(pending_reqs, 0, sizeof(pending_reqs));
+       for (i = 0; i < MAX_PENDING_REQS; i++)
+               pending_ring[i] = i;
     
-    spin_lock_init(&blkio_schedule_list_lock);
-    INIT_LIST_HEAD(&blkio_schedule_list);
-
-    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
-        BUG();
-
-    blkif_xenbus_init();
-
-    memset( pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES );
-
-#ifdef CONFIG_XEN_BLKDEV_TAP_BE
-    printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n");
-#endif
-
-    return 0;
+       spin_lock_init(&blkio_schedule_list_lock);
+       INIT_LIST_HEAD(&blkio_schedule_list);
+
+       BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
+
+       blkif_xenbus_init();
+
+       memset(pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES);
+
+       return 0;
 }
 
 __initcall(blkif_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blkback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Thu Sep 22 15:12:14 2005
@@ -31,39 +31,39 @@
 #endif
 
 struct vbd {
-    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
-    unsigned char  readonly;    /* Non-zero -> read-only */
-    unsigned char  type;        /* VDISK_xxx */
-    u32            pdevice;     /* phys device that this vbd maps to */
-    struct block_device *bdev;
+       blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
+       unsigned char  readonly;    /* Non-zero -> read-only */
+       unsigned char  type;        /* VDISK_xxx */
+       u32            pdevice;     /* phys device that this vbd maps to */
+       struct block_device *bdev;
 }; 
 
 typedef struct blkif_st {
-    /* Unique identifier for this interface. */
-    domid_t           domid;
-    unsigned int      handle;
-    /* Physical parameters of the comms window. */
-    unsigned int      evtchn;
-    unsigned int      remote_evtchn;
-    /* Comms information. */
-    blkif_back_ring_t blk_ring;
-    struct vm_struct *blk_ring_area;
-    /* VBDs attached to this interface. */
-    struct vbd        vbd;
-    /* Private fields. */
-    enum { DISCONNECTED, CONNECTED } status;
+       /* Unique identifier for this interface. */
+       domid_t           domid;
+       unsigned int      handle;
+       /* Physical parameters of the comms window. */
+       unsigned int      evtchn;
+       unsigned int      remote_evtchn;
+       /* Comms information. */
+       blkif_back_ring_t blk_ring;
+       struct vm_struct *blk_ring_area;
+       /* VBDs attached to this interface. */
+       struct vbd        vbd;
+       /* Private fields. */
+       enum { DISCONNECTED, CONNECTED } status;
 #ifdef CONFIG_XEN_BLKDEV_TAP_BE
-    /* Is this a blktap frontend */
-    unsigned int     is_blktap;
+       /* Is this a blktap frontend */
+       unsigned int     is_blktap;
 #endif
-    struct list_head blkdev_list;
-    spinlock_t       blk_ring_lock;
-    atomic_t         refcnt;
+       struct list_head blkdev_list;
+       spinlock_t       blk_ring_lock;
+       atomic_t         refcnt;
 
-    struct work_struct free_work;
+       struct work_struct free_work;
 
-    u16         shmem_handle;
-    grant_ref_t shmem_ref;
+       u16         shmem_handle;
+       grant_ref_t shmem_ref;
 } blkif_t;
 
 blkif_t *alloc_blkif(domid_t domid);
@@ -71,11 +71,11 @@
 int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
 
 #define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define blkif_put(_b)                             \
-    do {                                          \
-        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-            free_blkif_callback(_b);             \
-    } while (0)
+#define blkif_put(_b)                                  \
+       do {                                            \
+               if (atomic_dec_and_test(&(_b)->refcnt)) \
+                       free_blkif_callback(_b);        \
+       } while (0)
 
 /* Create a vbd. */
 int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
@@ -87,10 +87,10 @@
 unsigned long vbd_secsize(struct vbd *vbd);
 
 struct phys_req {
-    unsigned short       dev;
-    unsigned short       nr_sects;
-    struct block_device *bdev;
-    blkif_sector_t       sector_number;
+       unsigned short       dev;
+       unsigned short       nr_sects;
+       struct block_device *bdev;
+       blkif_sector_t       sector_number;
 };
 
 int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
@@ -104,3 +104,13 @@
 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 
 #endif /* __BLKIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blkback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Thu Sep 22 
15:12:14 2005
@@ -13,134 +13,144 @@
 
 blkif_t *alloc_blkif(domid_t domid)
 {
-    blkif_t *blkif;
+       blkif_t *blkif;
 
-    blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-    if (!blkif)
-           return ERR_PTR(-ENOMEM);
+       blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
+       if (!blkif)
+               return ERR_PTR(-ENOMEM);
 
-    memset(blkif, 0, sizeof(*blkif));
-    blkif->domid = domid;
-    blkif->status = DISCONNECTED;
-    spin_lock_init(&blkif->blk_ring_lock);
-    atomic_set(&blkif->refcnt, 1);
+       memset(blkif, 0, sizeof(*blkif));
+       blkif->domid = domid;
+       blkif->status = DISCONNECTED;
+       spin_lock_init(&blkif->blk_ring_lock);
+       atomic_set(&blkif->refcnt, 1);
 
-    return blkif;
+       return blkif;
 }
 
 static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
 {
-    struct gnttab_map_grant_ref op;
+       struct gnttab_map_grant_ref op;
 
-    op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
-    op.flags     = GNTMAP_host_map;
-    op.ref       = shared_page;
-    op.dom       = blkif->domid;
+       op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
+       op.flags     = GNTMAP_host_map;
+       op.ref       = shared_page;
+       op.dom       = blkif->domid;
 
-    lock_vm_area(blkif->blk_ring_area);
-    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
-    unlock_vm_area(blkif->blk_ring_area);
+       lock_vm_area(blkif->blk_ring_area);
+       BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
+       unlock_vm_area(blkif->blk_ring_area);
 
-    if (op.handle < 0) {
-       DPRINTK(" Grant table operation failure !\n");
-       return op.handle;
-    }
+       if (op.handle < 0) {
+               DPRINTK(" Grant table operation failure !\n");
+               return op.handle;
+       }
 
-    blkif->shmem_ref = shared_page;
-    blkif->shmem_handle = op.handle;
+       blkif->shmem_ref = shared_page;
+       blkif->shmem_handle = op.handle;
 
-    return 0;
+       return 0;
 }
 
 static void unmap_frontend_page(blkif_t *blkif)
 {
-    struct gnttab_unmap_grant_ref op;
+       struct gnttab_unmap_grant_ref op;
 
-    op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
-    op.handle       = blkif->shmem_handle;
-    op.dev_bus_addr = 0;
+       op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
+       op.handle       = blkif->shmem_handle;
+       op.dev_bus_addr = 0;
 
-    lock_vm_area(blkif->blk_ring_area);
-    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
-    unlock_vm_area(blkif->blk_ring_area);
+       lock_vm_area(blkif->blk_ring_area);
+       BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+       unlock_vm_area(blkif->blk_ring_area);
 }
 
 int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
 {
-    blkif_sring_t *sring;
-    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
-    int err;
+       blkif_sring_t *sring;
+       evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
+       int err;
 
-    BUG_ON(blkif->remote_evtchn);
+       BUG_ON(blkif->remote_evtchn);
 
-    if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-       return -ENOMEM;
+       if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
+               return -ENOMEM;
 
-    err = map_frontend_page(blkif, shared_page);
-    if (err) {
-        free_vm_area(blkif->blk_ring_area);
-       return err;
-    }
+       err = map_frontend_page(blkif, shared_page);
+       if (err) {
+               free_vm_area(blkif->blk_ring_area);
+               return err;
+       }
 
-    op.u.bind_interdomain.dom1 = DOMID_SELF;
-    op.u.bind_interdomain.dom2 = blkif->domid;
-    op.u.bind_interdomain.port1 = 0;
-    op.u.bind_interdomain.port2 = evtchn;
-    err = HYPERVISOR_event_channel_op(&op);
-    if (err) {
-       unmap_frontend_page(blkif);
-        free_vm_area(blkif->blk_ring_area);
-       return err;
-    }
+       op.u.bind_interdomain.dom1 = DOMID_SELF;
+       op.u.bind_interdomain.dom2 = blkif->domid;
+       op.u.bind_interdomain.port1 = 0;
+       op.u.bind_interdomain.port2 = evtchn;
+       err = HYPERVISOR_event_channel_op(&op);
+       if (err) {
+               unmap_frontend_page(blkif);
+               free_vm_area(blkif->blk_ring_area);
+               return err;
+       }
 
-    blkif->evtchn = op.u.bind_interdomain.port1;
-    blkif->remote_evtchn = evtchn;
+       blkif->evtchn = op.u.bind_interdomain.port1;
+       blkif->remote_evtchn = evtchn;
 
-    sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-    SHARED_RING_INIT(sring);
-    BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+       sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+       SHARED_RING_INIT(sring);
+       BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
 
-    bind_evtchn_to_irqhandler(blkif->evtchn, blkif_be_int, 0, "blkif-backend",
-                             blkif);
-    blkif->status = CONNECTED;
+       bind_evtchn_to_irqhandler(
+               blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
+       blkif->status = CONNECTED;
 
-    return 0;
+       return 0;
 }
 
 static void free_blkif(void *arg)
 {
-    evtchn_op_t op = { .cmd = EVTCHNOP_close };
-    blkif_t *blkif = (blkif_t *)arg;
+       evtchn_op_t op = { .cmd = EVTCHNOP_close };
+       blkif_t *blkif = (blkif_t *)arg;
 
-    op.u.close.port = blkif->evtchn;
-    op.u.close.dom = DOMID_SELF;
-    HYPERVISOR_event_channel_op(&op);
-    op.u.close.port = blkif->remote_evtchn;
-    op.u.close.dom = blkif->domid;
-    HYPERVISOR_event_channel_op(&op);
+       op.u.close.port = blkif->evtchn;
+       op.u.close.dom = DOMID_SELF;
+       HYPERVISOR_event_channel_op(&op);
+       op.u.close.port = blkif->remote_evtchn;
+       op.u.close.dom = blkif->domid;
+       HYPERVISOR_event_channel_op(&op);
 
-    vbd_free(&blkif->vbd);
+       vbd_free(&blkif->vbd);
 
-    if (blkif->evtchn)
-        unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
+       if (blkif->evtchn)
+               unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
 
-    if (blkif->blk_ring.sring) {
-       unmap_frontend_page(blkif);
-        free_vm_area(blkif->blk_ring_area);
-       blkif->blk_ring.sring = NULL;
-    }
+       if (blkif->blk_ring.sring) {
+               unmap_frontend_page(blkif);
+               free_vm_area(blkif->blk_ring_area);
+               blkif->blk_ring.sring = NULL;
+       }
 
-    kmem_cache_free(blkif_cachep, blkif);
+       kmem_cache_free(blkif_cachep, blkif);
 }
 
 void free_blkif_callback(blkif_t *blkif)
 {
-    INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
-    schedule_work(&blkif->free_work);
+       INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
+       schedule_work(&blkif->free_work);
 }
 
 void __init blkif_interface_init(void)
 {
-    blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
-                                     0, 0, NULL, NULL);
+       blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
+                                        0, 0, NULL, NULL);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c    Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c    Thu Sep 22 15:12:14 2005
@@ -11,10 +11,10 @@
 
 static inline dev_t vbd_map_devnum(u32 cookie)
 {
-    return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
+       return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
 }
-#define vbd_sz(_v)   ((_v)->bdev->bd_part ? \
-    (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
+#define vbd_sz(_v)   ((_v)->bdev->bd_part ?                            \
+       (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
 #define bdev_put(_b) blkdev_put(_b)
 
 unsigned long vbd_size(struct vbd *vbd)
@@ -35,63 +35,73 @@
 int vbd_create(blkif_t *blkif, blkif_vdev_t handle,
               u32 pdevice, int readonly)
 {
-    struct vbd *vbd;
+       struct vbd *vbd;
 
-    vbd = &blkif->vbd;
-    vbd->handle   = handle; 
-    vbd->readonly = readonly;
-    vbd->type     = 0;
+       vbd = &blkif->vbd;
+       vbd->handle   = handle; 
+       vbd->readonly = readonly;
+       vbd->type     = 0;
 
-    vbd->pdevice  = pdevice;
+       vbd->pdevice  = pdevice;
 
-    vbd->bdev = open_by_devnum(
-        vbd_map_devnum(vbd->pdevice),
-        vbd->readonly ? FMODE_READ : FMODE_WRITE);
-    if ( IS_ERR(vbd->bdev) )
-    {
-        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
-        return -ENOENT;
-    }
+       vbd->bdev = open_by_devnum(
+               vbd_map_devnum(vbd->pdevice),
+               vbd->readonly ? FMODE_READ : FMODE_WRITE);
+       if (IS_ERR(vbd->bdev)) {
+               DPRINTK("vbd_creat: device %08x doesn't exist.\n",
+                       vbd->pdevice);
+               return -ENOENT;
+       }
 
-    if ( (vbd->bdev->bd_disk == NULL) )
-    {
-        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
-       vbd_free(vbd);
-        return -ENOENT;
-    }
+       if (vbd->bdev->bd_disk == NULL) {
+               DPRINTK("vbd_creat: device %08x doesn't exist.\n",
+                       vbd->pdevice);
+               vbd_free(vbd);
+               return -ENOENT;
+       }
 
-    if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD )
-        vbd->type |= VDISK_CDROM;
-    if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE )
-        vbd->type |= VDISK_REMOVABLE;
+       if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
+               vbd->type |= VDISK_CDROM;
+       if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
+               vbd->type |= VDISK_REMOVABLE;
 
-    DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
-            handle, blkif->domid);
-    return 0;
+       DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
+               handle, blkif->domid);
+       return 0;
 }
 
 void vbd_free(struct vbd *vbd)
 {
-    if (vbd->bdev)
-       bdev_put(vbd->bdev);
-    vbd->bdev = NULL;
+       if (vbd->bdev)
+               bdev_put(vbd->bdev);
+       vbd->bdev = NULL;
 }
 
 int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
 {
-    struct vbd *vbd = &blkif->vbd;
-    int rc = -EACCES;
+       struct vbd *vbd = &blkif->vbd;
+       int rc = -EACCES;
 
-    if ((operation == WRITE) && vbd->readonly)
-        goto out;
+       if ((operation == WRITE) && vbd->readonly)
+               goto out;
 
-    if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
-        goto out;
+       if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
+               goto out;
 
-    req->dev  = vbd->pdevice;
-    req->bdev = vbd->bdev;
-    rc = 0;
+       req->dev  = vbd->pdevice;
+       req->bdev = vbd->bdev;
+       rc = 0;
 
  out:
-    return rc;
+       return rc;
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Thu Sep 22 15:12:14 2005
@@ -124,7 +124,7 @@
 
        return;
 
-abort:
+ abort:
        xenbus_transaction_end(1);
 }
 
@@ -278,3 +278,13 @@
 {
        xenbus_register_backend(&blkback);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blkfront/block.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Thu Sep 22 15:12:14 2005
@@ -146,4 +146,15 @@
 int xlvbd_add(blkif_sector_t capacity, int device,
              u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
 void xlvbd_del(struct blkfront_info *info);
+
 #endif /* __XEN_DRIVERS_BLOCK_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c   Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c   Thu Sep 22 15:12:14 2005
@@ -65,7 +65,7 @@
 };
 
 static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
-                                         NUM_VBD_MAJORS];
+                                        NUM_VBD_MAJORS];
 
 #define XLBD_MAJOR_IDE_START   0
 #define XLBD_MAJOR_SCSI_START  (NUM_IDE_MAJORS)
@@ -309,3 +309,13 @@
 
        bdput(bd);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Thu Sep 22 15:12:14 2005
@@ -4,7 +4,6 @@
  * This is a modified version of the block backend driver that remaps requests
  * to a user-space memory region.  It is intended to be used to write 
  * application-level servers that provide block interfaces to client VMs.
- * 
  */
 
 #include <linux/kernel.h>
@@ -67,20 +66,19 @@
 
 static inline int BLKTAP_MODE_VALID(unsigned long arg)
 {
-    return (
-        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
-        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
-        ( arg == BLKTAP_MODE_INTERPOSE    ) );
+       return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
+               (arg == BLKTAP_MODE_INTERCEPT_FE) ||
+               (arg == BLKTAP_MODE_INTERPOSE   ));
 /*
-    return (
-        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
-        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
-        ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
-        ( arg == BLKTAP_MODE_INTERPOSE    ) ||
-        ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
-        ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
-        ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
-        );
+  return (
+  ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
+  ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
+  ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
+  ( arg == BLKTAP_MODE_INTERPOSE    ) ||
+  ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
+  ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
+  ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
+  );
 */
 }
 
@@ -110,14 +108,12 @@
 unsigned long rings_vstart; /* start of mmaped vma               */
 unsigned long user_vstart;  /* start of user mappings            */
 
-#define MMAP_PAGES                                              \
-    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-#define MMAP_VADDR(_start, _req,_seg)                           \
-    (_start +                                                   \
-     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
-     ((_seg) * PAGE_SIZE))
-
-
+#define MMAP_PAGES                                             \
+       (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+#define MMAP_VADDR(_start, _req,_seg)                                  \
+       (_start +                                                       \
+        ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +        \
+        ((_seg) * PAGE_SIZE))
 
 /*
  * Each outstanding request that we've passed to the lower device layers has a 
@@ -126,12 +122,12 @@
  * response queued for it, with the saved 'id' passed back.
  */
 typedef struct {
-    blkif_t       *blkif;
-    unsigned long  id;
-    int            nr_pages;
-    atomic_t       pendcnt;
-    unsigned short operation;
-    int            status;
+       blkif_t       *blkif;
+       unsigned long  id;
+       int            nr_pages;
+       atomic_t       pendcnt;
+       unsigned short operation;
+       int            status;
 } pending_req_t;
 
 /*
@@ -156,17 +152,17 @@
 
 static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
 {
-    return ( (fe_dom << 16) | MASK_PEND_IDX(idx) );
+       return ((fe_dom << 16) | MASK_PEND_IDX(idx));
 }
 
 extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) 
 { 
-    return (PEND_RING_IDX)( id & 0x0000ffff );
+       return (PEND_RING_IDX)(id & 0x0000ffff);
 }
 
 extern inline domid_t ID_TO_DOM(unsigned long id) 
 { 
-    return (domid_t)(id >> 16); 
+       return (domid_t)(id >> 16); 
 }
 
 
@@ -181,8 +177,8 @@
  */
 struct grant_handle_pair
 {
-    u16  kernel;
-    u16  user;
+       u16  kernel;
+       u16  user;
 };
 static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
 #define pending_handle(_idx, _i) \
@@ -199,21 +195,20 @@
  */
 
 static struct page *blktap_nopage(struct vm_area_struct *vma,
-                                             unsigned long address,
-                                             int *type)
-{
-    /*
-     * if the page has not been mapped in by the driver then generate
-     * a SIGBUS to the domain.
-     */
-
-    force_sig(SIGBUS, current);
-
-    return 0;
+                                 unsigned long address,
+                                 int *type)
+{
+       /*
+        * if the page has not been mapped in by the driver then generate
+        * a SIGBUS to the domain.
+        */
+       force_sig(SIGBUS, current);
+
+       return 0;
 }
 
 struct vm_operations_struct blktap_vm_ops = {
-    nopage:   blktap_nopage,
+       nopage:   blktap_nopage,
 };
 
 /******************************************************************
@@ -222,44 +217,45 @@
 
 static int blktap_open(struct inode *inode, struct file *filp)
 {
-    blkif_sring_t *sring;
+       blkif_sring_t *sring;
+
+       if (test_and_set_bit(0, &blktap_dev_inuse))
+               return -EBUSY;
     
-    if ( test_and_set_bit(0, &blktap_dev_inuse) )
-        return -EBUSY;
+       /* Allocate the fe ring. */
+       sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
+       if (sring == NULL)
+               goto fail_nomem;
+
+       SetPageReserved(virt_to_page(sring));
     
-    /* Allocate the fe ring. */
-    sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
-    if (sring == NULL)
-        goto fail_nomem;
-
-    SetPageReserved(virt_to_page(sring));
-    
-    SHARED_RING_INIT(sring);
-    FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
-
-    return 0;
+       SHARED_RING_INIT(sring);
+       FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
+
+       return 0;
 
  fail_nomem:
-    return -ENOMEM;
+       return -ENOMEM;
 }
 
 static int blktap_release(struct inode *inode, struct file *filp)
 {
-    blktap_dev_inuse = 0;
-    blktap_ring_ok = 0;
-
-    /* Free the ring page. */
-    ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
-    free_page((unsigned long) blktap_ufe_ring.sring);
-
-    /* Clear any active mappings and free foreign map table */
-    if (blktap_vma != NULL) {
-        zap_page_range(blktap_vma, blktap_vma->vm_start, 
-                       blktap_vma->vm_end - blktap_vma->vm_start, NULL);
-        blktap_vma = NULL;
-    }
-
-    return 0;
+       blktap_dev_inuse = 0;
+       blktap_ring_ok = 0;
+
+       /* Free the ring page. */
+       ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
+       free_page((unsigned long) blktap_ufe_ring.sring);
+
+       /* Clear any active mappings and free foreign map table */
+       if (blktap_vma != NULL) {
+               zap_page_range(
+                       blktap_vma, blktap_vma->vm_start, 
+                       blktap_vma->vm_end - blktap_vma->vm_start, NULL);
+               blktap_vma = NULL;
+       }
+
+       return 0;
 }
 
 
@@ -283,128 +279,124 @@
  */
 static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
 {
-    int size;
-    struct page **map;
-    int i;
-
-    DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
-           vma->vm_start, vma->vm_end);
-
-    vma->vm_flags |= VM_RESERVED;
-    vma->vm_ops = &blktap_vm_ops;
-
-    size = vma->vm_end - vma->vm_start;
-    if ( size != ( (MMAP_PAGES + RING_PAGES) << PAGE_SHIFT ) ) {
-        printk(KERN_INFO 
-               "blktap: you _must_ map exactly %d pages!\n",
-               MMAP_PAGES + RING_PAGES);
-        return -EAGAIN;
-    }
-
-    size >>= PAGE_SHIFT;
-    DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
+       int size;
+       struct page **map;
+       int i;
+
+       DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
+               vma->vm_start, vma->vm_end);
+
+       vma->vm_flags |= VM_RESERVED;
+       vma->vm_ops = &blktap_vm_ops;
+
+       size = vma->vm_end - vma->vm_start;
+       if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
+               printk(KERN_INFO 
+                      "blktap: you _must_ map exactly %d pages!\n",
+                      MMAP_PAGES + RING_PAGES);
+               return -EAGAIN;
+       }
+
+       size >>= PAGE_SHIFT;
+       DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
     
-    rings_vstart = vma->vm_start;
-    user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
+       rings_vstart = vma->vm_start;
+       user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
     
-    /* Map the ring pages to the start of the region and reserve it. */
-
-    /* not sure if I really need to do this... */
-    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-    if (remap_pfn_range(vma, vma->vm_start, 
-                         __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
-                         PAGE_SIZE, vma->vm_page_prot)) 
-    {
-        WPRINTK("Mapping user ring failed!\n");
-        goto fail;
-    }
-
-    /* Mark this VM as containing foreign pages, and set up mappings. */
-    map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
-                  * sizeof(struct page_struct*),
-                  GFP_KERNEL);
-    if (map == NULL) 
-    {
-        WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
-        goto fail;
-    }
-
-    for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
-        map[i] = NULL;
+       /* Map the ring pages to the start of the region and reserve it. */
+
+       /* not sure if I really need to do this... */
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       if (remap_pfn_range(vma, vma->vm_start, 
+                           __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
+                           PAGE_SIZE, vma->vm_page_prot)) {
+               WPRINTK("Mapping user ring failed!\n");
+               goto fail;
+       }
+
+       /* Mark this VM as containing foreign pages, and set up mappings. */
+       map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
+                     * sizeof(struct page_struct*),
+                     GFP_KERNEL);
+       if (map == NULL) {
+               WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
+               goto fail;
+       }
+
+       for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
+               map[i] = NULL;
     
-    vma->vm_private_data = map;
-    vma->vm_flags |= VM_FOREIGN;
-
-    blktap_vma = vma;
-    blktap_ring_ok = 1;
-
-    return 0;
+       vma->vm_private_data = map;
+       vma->vm_flags |= VM_FOREIGN;
+
+       blktap_vma = vma;
+       blktap_ring_ok = 1;
+
+       return 0;
  fail:
-    /* Clear any active mappings. */
-    zap_page_range(vma, vma->vm_start, 
-                   vma->vm_end - vma->vm_start, NULL);
-
-    return -ENOMEM;
+       /* Clear any active mappings. */
+       zap_page_range(vma, vma->vm_start, 
+                      vma->vm_end - vma->vm_start, NULL);
+
+       return -ENOMEM;
 }
 
 static int blktap_ioctl(struct inode *inode, struct file *filp,
                         unsigned int cmd, unsigned long arg)
 {
-    switch(cmd) {
-    case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
-        return blktap_read_ufe_ring();
-
-    case BLKTAP_IOCTL_SETMODE:
-        if (BLKTAP_MODE_VALID(arg)) {
-            blktap_mode = arg;
-            /* XXX: may need to flush rings here. */
-            printk(KERN_INFO "blktap: set mode to %lx\n", arg);
-            return 0;
-        }
-    case BLKTAP_IOCTL_PRINT_IDXS:
+       switch(cmd) {
+       case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
+               return blktap_read_ufe_ring();
+
+       case BLKTAP_IOCTL_SETMODE:
+               if (BLKTAP_MODE_VALID(arg)) {
+                       blktap_mode = arg;
+                       /* XXX: may need to flush rings here. */
+                       printk(KERN_INFO "blktap: set mode to %lx\n", arg);
+                       return 0;
+               }
+       case BLKTAP_IOCTL_PRINT_IDXS:
         {
-            //print_fe_ring_idxs();
-            WPRINTK("User Rings: \n-----------\n");
-            WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
-                            "| req_prod: %2d, rsp_prod: %2d\n",
-                            blktap_ufe_ring.rsp_cons,
-                            blktap_ufe_ring.req_prod_pvt,
-                            blktap_ufe_ring.sring->req_prod,
-                            blktap_ufe_ring.sring->rsp_prod);
+               //print_fe_ring_idxs();
+               WPRINTK("User Rings: \n-----------\n");
+               WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
+                       "| req_prod: %2d, rsp_prod: %2d\n",
+                       blktap_ufe_ring.rsp_cons,
+                       blktap_ufe_ring.req_prod_pvt,
+                       blktap_ufe_ring.sring->req_prod,
+                       blktap_ufe_ring.sring->rsp_prod);
             
         }
-    }
-    return -ENOIOCTLCMD;
+       }
+       return -ENOIOCTLCMD;
 }
 
 static unsigned int blktap_poll(struct file *file, poll_table *wait)
 {
-        poll_wait(file, &blktap_wait, wait);
-        if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) ) 
-        {
-            flush_tlb_all();
-
-            RING_PUSH_REQUESTS(&blktap_ufe_ring);
-            return POLLIN | POLLRDNORM;
-        }
-
-        return 0;
+       poll_wait(file, &blktap_wait, wait);
+       if (RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)) {
+               flush_tlb_all();
+               RING_PUSH_REQUESTS(&blktap_ufe_ring);
+               return POLLIN | POLLRDNORM;
+       }
+
+       return 0;
 }
 
 void blktap_kick_user(void)
 {
-    /* blktap_ring->req_prod = blktap_req_prod; */
-    wake_up_interruptible(&blktap_wait);
+       /* blktap_ring->req_prod = blktap_req_prod; */
+       wake_up_interruptible(&blktap_wait);
 }
 
 static struct file_operations blktap_fops = {
-    owner:    THIS_MODULE,
-    poll:     blktap_poll,
-    ioctl:    blktap_ioctl,
-    open:     blktap_open,
-    release:  blktap_release,
-    mmap:     blktap_mmap,
+       owner:    THIS_MODULE,
+       poll:     blktap_poll,
+       ioctl:    blktap_ioctl,
+       open:     blktap_open,
+       release:  blktap_release,
+       mmap:     blktap_mmap,
 };
 
 
@@ -417,44 +409,44 @@
 
 static void fast_flush_area(int idx, int nr_pages)
 {
-    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-    unsigned int i, op = 0;
-    struct grant_handle_pair *handle;
-    unsigned long ptep;
-
-    for (i=0; i<nr_pages; i++)
-    {
-        handle = &pending_handle(idx, i);
-        if (!BLKTAP_INVALID_HANDLE(handle))
-        {
-
-            unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
-            unmap[op].dev_bus_addr = 0;
-            unmap[op].handle = handle->kernel;
-            op++;
-
-            if (create_lookup_pte_addr(blktap_vma->vm_mm,
-                                       MMAP_VADDR(user_vstart, idx, i), 
-                                       &ptep) !=0) {
-                DPRINTK("Couldn't get a pte addr!\n");
-                return;
-            }
-            unmap[op].host_addr    = ptep;
-            unmap[op].dev_bus_addr = 0;
-            unmap[op].handle       = handle->user;
-            op++;
+       struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
+       unsigned int i, op = 0;
+       struct grant_handle_pair *handle;
+       unsigned long ptep;
+
+       for ( i = 0; i < nr_pages; i++)
+       {
+               handle = &pending_handle(idx, i);
+               if (BLKTAP_INVALID_HANDLE(handle))
+                       continue;
+
+               unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
+               unmap[op].dev_bus_addr = 0;
+               unmap[op].handle = handle->kernel;
+               op++;
+
+               if (create_lookup_pte_addr(
+                       blktap_vma->vm_mm,
+                       MMAP_VADDR(user_vstart, idx, i), 
+                       &ptep) !=0) {
+                       DPRINTK("Couldn't get a pte addr!\n");
+                       return;
+               }
+               unmap[op].host_addr    = ptep;
+               unmap[op].dev_bus_addr = 0;
+               unmap[op].handle       = handle->user;
+               op++;
             
-           BLKTAP_INVALIDATE_HANDLE(handle);
-        }
-    }
-    if ( unlikely(HYPERVISOR_grant_table_op(
-        GNTTABOP_unmap_grant_ref, unmap, op)))
-        BUG();
-
-    if (blktap_vma != NULL)
-        zap_page_range(blktap_vma, 
-                       MMAP_VADDR(user_vstart, idx, 0), 
-                       nr_pages << PAGE_SHIFT, NULL);
+               BLKTAP_INVALIDATE_HANDLE(handle);
+       }
+
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_unmap_grant_ref, unmap, op));
+
+       if (blktap_vma != NULL)
+               zap_page_range(blktap_vma, 
+                              MMAP_VADDR(user_vstart, idx, 0), 
+                              nr_pages << PAGE_SHIFT, NULL);
 }
 
 /******************************************************************
@@ -466,34 +458,38 @@
 
 static int __on_blkdev_list(blkif_t *blkif)
 {
-    return blkif->blkdev_list.next != NULL;
+       return blkif->blkdev_list.next != NULL;
 }
 
 static void remove_from_blkdev_list(blkif_t *blkif)
 {
-    unsigned long flags;
-    if ( !__on_blkdev_list(blkif) ) return;
-    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-    if ( __on_blkdev_list(blkif) )
-    {
-        list_del(&blkif->blkdev_list);
-        blkif->blkdev_list.next = NULL;
-        blkif_put(blkif);
-    }
-    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+       unsigned long flags;
+
+       if (!__on_blkdev_list(blkif))
+               return;
+
+       spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+       if (__on_blkdev_list(blkif)) {
+               list_del(&blkif->blkdev_list);
+               blkif->blkdev_list.next = NULL;
+               blkif_put(blkif);
+       }
+       spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
 }
 
 static void add_to_blkdev_list_tail(blkif_t *blkif)
 {
-    unsigned long flags;
-    if ( __on_blkdev_list(blkif) ) return;
-    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
-    {
-        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
-        blkif_get(blkif);
-    }
-    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+       unsigned long flags;
+
+       if (__on_blkdev_list(blkif))
+               return;
+
+       spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+       if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
+               list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+               blkif_get(blkif);
+       }
+       spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
 }
 
 
@@ -505,51 +501,50 @@
 
 static int blkio_schedule(void *arg)
 {
-    DECLARE_WAITQUEUE(wq, current);
-
-    blkif_t          *blkif;
-    struct list_head *ent;
-
-    daemonize("xenblkd");
-
-    for ( ; ; )
-    {
-        /* Wait for work to do. */
-        add_wait_queue(&blkio_schedule_wait, &wq);
-        set_current_state(TASK_INTERRUPTIBLE);
-        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
-             list_empty(&blkio_schedule_list) )
-            schedule();
-        __set_current_state(TASK_RUNNING);
-        remove_wait_queue(&blkio_schedule_wait, &wq);
-
-        /* Queue up a batch of requests. */
-        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
-                !list_empty(&blkio_schedule_list) )
-        {
-            ent = blkio_schedule_list.next;
-            blkif = list_entry(ent, blkif_t, blkdev_list);
-            blkif_get(blkif);
-            remove_from_blkdev_list(blkif);
-            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
-                add_to_blkdev_list_tail(blkif);
-            blkif_put(blkif);
-        }
-    }
+       DECLARE_WAITQUEUE(wq, current);
+
+       blkif_t          *blkif;
+       struct list_head *ent;
+
+       daemonize("xenblkd");
+
+       for (;;) {
+               /* Wait for work to do. */
+               add_wait_queue(&blkio_schedule_wait, &wq);
+               set_current_state(TASK_INTERRUPTIBLE);
+               if ((NR_PENDING_REQS == MAX_PENDING_REQS) || 
+                   list_empty(&blkio_schedule_list))
+                       schedule();
+               __set_current_state(TASK_RUNNING);
+               remove_wait_queue(&blkio_schedule_wait, &wq);
+
+               /* Queue up a batch of requests. */
+               while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
+                      !list_empty(&blkio_schedule_list)) {
+                       ent = blkio_schedule_list.next;
+                       blkif = list_entry(ent, blkif_t, blkdev_list);
+                       blkif_get(blkif);
+                       remove_from_blkdev_list(blkif);
+                       if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
+                               add_to_blkdev_list_tail(blkif);
+                       blkif_put(blkif);
+               }
+       }
 }
 
 static void maybe_trigger_blkio_schedule(void)
 {
-    /*
-     * Needed so that two processes, who together make the following predicate
-     * true, don't both read stale values and evaluate the predicate
-     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
-     */
-    smp_mb();
-
-    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-         !list_empty(&blkio_schedule_list) )
-        wake_up(&blkio_schedule_wait);
+       /*
+        * Needed so that two processes, who together make the following
+        * predicate true, don't both read stale values and evaluate the
+        * predicate incorrectly. Incredibly unlikely to stall the scheduler
+        * on the x86, but...
+        */
+       smp_mb();
+
+       if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+           !list_empty(&blkio_schedule_list))
+               wake_up(&blkio_schedule_wait);
 }
 
 
@@ -561,54 +556,53 @@
 
 static int blktap_read_ufe_ring(void)
 {
-    /* This is called to read responses from the UFE ring. */
-
-    RING_IDX i, j, rp;
-    blkif_response_t *resp;
-    blkif_t *blkif;
-    int pending_idx;
-    pending_req_t *pending_req;
-    unsigned long     flags;
-
-    /* if we are forwarding from UFERring to FERing */
-    if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
-
-        /* for each outstanding message on the UFEring  */
-        rp = blktap_ufe_ring.sring->rsp_prod;
-        rmb();
+       /* This is called to read responses from the UFE ring. */
+
+       RING_IDX i, j, rp;
+       blkif_response_t *resp;
+       blkif_t *blkif;
+       int pending_idx;
+       pending_req_t *pending_req;
+       unsigned long     flags;
+
+       /* if we are forwarding from UFERring to FERing */
+       if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
+
+               /* for each outstanding message on the UFEring  */
+               rp = blktap_ufe_ring.sring->rsp_prod;
+               rmb();
         
-        for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
-        {
-            resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
-            pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
-            pending_req = &pending_reqs[pending_idx];
+               for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
+                       resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
+                       pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
+                       pending_req = &pending_reqs[pending_idx];
             
-            blkif = pending_req->blkif;
-            for (j = 0; j < pending_req->nr_pages; j++) {
-                unsigned long vaddr;
-                struct page **map = blktap_vma->vm_private_data;
-                int offset; 
-
-                vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
-                offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
-
-                //ClearPageReserved(virt_to_page(vaddr));
-                ClearPageReserved((struct page *)map[offset]);
-                map[offset] = NULL;
-            }
-
-            fast_flush_area(pending_idx, pending_req->nr_pages);
-            make_response(blkif, pending_req->id, resp->operation, 
-                          resp->status);
-            blkif_put(pending_req->blkif);
-            spin_lock_irqsave(&pend_prod_lock, flags);
-            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-            spin_unlock_irqrestore(&pend_prod_lock, flags);
-        }
-        blktap_ufe_ring.rsp_cons = i;
-        maybe_trigger_blkio_schedule();
-    }
-    return 0;
+                       blkif = pending_req->blkif;
+                       for (j = 0; j < pending_req->nr_pages; j++) {
+                               unsigned long vaddr;
+                               struct page **map = blktap_vma->vm_private_data;
+                               int offset; 
+
+                               vaddr  = MMAP_VADDR(user_vstart, pending_idx, 
j);
+                               offset = (vaddr - blktap_vma->vm_start) >> 
PAGE_SHIFT;
+
+                               //ClearPageReserved(virt_to_page(vaddr));
+                               ClearPageReserved((struct page *)map[offset]);
+                               map[offset] = NULL;
+                       }
+
+                       fast_flush_area(pending_idx, pending_req->nr_pages);
+                       make_response(blkif, pending_req->id, resp->operation, 
+                                     resp->status);
+                       blkif_put(pending_req->blkif);
+                       spin_lock_irqsave(&pend_prod_lock, flags);
+                       pending_ring[MASK_PEND_IDX(pending_prod++)] = 
pending_idx;
+                       spin_unlock_irqrestore(&pend_prod_lock, flags);
+               }
+               blktap_ufe_ring.rsp_cons = i;
+               maybe_trigger_blkio_schedule();
+       }
+       return 0;
 }
 
 
@@ -618,10 +612,10 @@
 
 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 {
-    blkif_t *blkif = dev_id;
-    add_to_blkdev_list_tail(blkif);
-    maybe_trigger_blkio_schedule();
-    return IRQ_HANDLED;
+       blkif_t *blkif = dev_id;
+       add_to_blkdev_list_tail(blkif);
+       maybe_trigger_blkio_schedule();
+       return IRQ_HANDLED;
 }
 
 
@@ -632,199 +626,194 @@
 
 static int do_block_io_op(blkif_t *blkif, int max_to_do)
 {
-    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-    blkif_request_t *req;
-    RING_IDX i, rp;
-    int more_to_do = 0;
+       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_request_t *req;
+       RING_IDX i, rp;
+       int more_to_do = 0;
     
-    rp = blk_ring->sring->req_prod;
-    rmb(); /* Ensure we see queued requests up to 'rp'. */
-
-    for ( i = blk_ring->req_cons; 
-         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
-          i++ )
-    {
-        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
-        {
-            more_to_do = 1;
-            break;
-        }
+       rp = blk_ring->sring->req_prod;
+       rmb(); /* Ensure we see queued requests up to 'rp'. */
+
+       for (i = blk_ring->req_cons; 
+            (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
+            i++ ) {
+               if ((max_to_do-- == 0) ||
+                   (NR_PENDING_REQS == MAX_PENDING_REQS)) {
+                       more_to_do = 1;
+                       break;
+               }
         
-        req = RING_GET_REQUEST(blk_ring, i);
-        switch ( req->operation )
-        {
-        case BLKIF_OP_READ:
-        case BLKIF_OP_WRITE:
-            dispatch_rw_block_io(blkif, req);
-            break;
-
-        default:
-            DPRINTK("error: unknown block io operation [%d]\n",
-                    req->operation);
-            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-            break;
-        }
-    }
-
-    blk_ring->req_cons = i;
-    blktap_kick_user();
-
-    return more_to_do;
+               req = RING_GET_REQUEST(blk_ring, i);
+               switch (req->operation) {
+               case BLKIF_OP_READ:
+               case BLKIF_OP_WRITE:
+                       dispatch_rw_block_io(blkif, req);
+                       break;
+
+               default:
+                       DPRINTK("error: unknown block io operation [%d]\n",
+                               req->operation);
+                       make_response(blkif, req->id, req->operation,
+                                     BLKIF_RSP_ERROR);
+                       break;
+               }
+       }
+
+       blk_ring->req_cons = i;
+       blktap_kick_user();
+
+       return more_to_do;
 }
 
 static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
 {
-    blkif_request_t *target;
-    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-    pending_req_t *pending_req;
-    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-    int op, ret;
-    unsigned int nseg;
-
-    /* Check that number of segments is sane. */
-    nseg = req->nr_segments;
-    if ( unlikely(nseg == 0) || 
-         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
-    {
-        DPRINTK("Bad number of segments in request (%d)\n", nseg);
-        goto bad_descriptor;
-    }
-
-    /* Make sure userspace is ready. */
-    if (!blktap_ring_ok) {
-        DPRINTK("blktap: ring not ready for requests!\n");
-        goto bad_descriptor;
-    }
+       blkif_request_t *target;
+       int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+       pending_req_t *pending_req;
+       struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
+       int op, ret;
+       unsigned int nseg;
+
+       /* Check that number of segments is sane. */
+       nseg = req->nr_segments;
+       if (unlikely(nseg == 0) || 
+           unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+               DPRINTK("Bad number of segments in request (%d)\n", nseg);
+               goto bad_descriptor;
+       }
+
+       /* Make sure userspace is ready. */
+       if (!blktap_ring_ok) {
+               DPRINTK("blktap: ring not ready for requests!\n");
+               goto bad_descriptor;
+       }
     
 
-    if ( RING_FULL(&blktap_ufe_ring) ) {
-        WPRINTK("blktap: fe_ring is full, can't add (very broken!).\n");
-        goto bad_descriptor;
-    }
-
-    flush_cache_all(); /* a noop on intel... */
-
-    /* Map the foreign pages directly in to the application */    
-    op = 0;
-    for (i=0; i<req->nr_segments; i++) {
-
-        unsigned long uvaddr;
-        unsigned long kvaddr;
-        unsigned long ptep;
-
-        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
-        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
-
-        /* Map the remote page to kernel. */
-        map[op].host_addr = kvaddr;
-        map[op].dom   = blkif->domid;
-        map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
-        map[op].flags = GNTMAP_host_map;
-        /* This needs a bit more thought in terms of interposition: 
-         * If we want to be able to modify pages during write using 
-         * grant table mappings, the guest will either need to allow 
-         * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
-        if (req->operation == BLKIF_OP_WRITE)
-            map[op].flags |= GNTMAP_readonly;
-        op++;
-
-        /* Now map it to user. */
-        ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
-        if (ret)
-        {
-            DPRINTK("Couldn't get a pte addr!\n");
-            fast_flush_area(pending_idx, req->nr_segments);
-            goto bad_descriptor;
-        }
-
-        map[op].host_addr = ptep;
-        map[op].dom       = blkif->domid;
-        map[op].ref       = blkif_gref_from_fas(req->frame_and_sects[i]);
-        map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
-                            | GNTMAP_contains_pte;
-        /* Above interposition comment applies here as well. */
-        if (req->operation == BLKIF_OP_WRITE)
-            map[op].flags |= GNTMAP_readonly;
-        op++;
-    }
-
-    if ( unlikely(HYPERVISOR_grant_table_op(
-            GNTTABOP_map_grant_ref, map, op)))
-        BUG();
-
-    op = 0;
-    for (i=0; i<(req->nr_segments*2); i+=2) {
-        unsigned long uvaddr;
-        unsigned long kvaddr;
-        unsigned long offset;
-        int cancel = 0;
-
-        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
-        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
-
-        if ( unlikely(map[i].handle < 0) ) 
-        {
-            DPRINTK("Error on kernel grant mapping (%d)\n", map[i].handle);
-            ret = map[i].handle;
-            cancel = 1;
-        }
-
-        if ( unlikely(map[i+1].handle < 0) ) 
-        {
-            DPRINTK("Error on user grant mapping (%d)\n", map[i+1].handle);
-            ret = map[i+1].handle;
-            cancel = 1;
-        }
-
-        if (cancel) 
-        {
-            fast_flush_area(pending_idx, req->nr_segments);
-            goto bad_descriptor;
-        }
-
-        /* Set the necessary mappings in p2m and in the VM_FOREIGN 
-         * vm_area_struct to allow user vaddr -> struct page lookups
-         * to work.  This is needed for direct IO to foreign pages. */
-        phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
-            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-
-        offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
-        ((struct page **)blktap_vma->vm_private_data)[offset] =
-            pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-
-        /* Save handles for unmapping later. */
-        pending_handle(pending_idx, i/2).kernel = map[i].handle;
-        pending_handle(pending_idx, i/2).user   = map[i+1].handle;
-    }
-
-    /* Mark mapped pages as reserved: */
-    for ( i = 0; i < req->nr_segments; i++ )
-    {
-        unsigned long kvaddr;
-
-        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
-        SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
-    }
-
-    pending_req = &pending_reqs[pending_idx];
-    pending_req->blkif     = blkif;
-    pending_req->id        = req->id;
-    pending_req->operation = req->operation;
-    pending_req->status    = BLKIF_RSP_OKAY;
-    pending_req->nr_pages  = nseg;
-    req->id = MAKE_ID(blkif->domid, pending_idx);
-    //atomic_set(&pending_req->pendcnt, nbio);
-    pending_cons++;
-    blkif_get(blkif);
-
-    /* Finally, write the request message to the user ring. */
-    target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
-    memcpy(target, req, sizeof(*req));
-    blktap_ufe_ring.req_prod_pvt++;
-    return;
+       if (RING_FULL(&blktap_ufe_ring)) {
+               WPRINTK("blktap: fe_ring is full, can't add "
+                       "(very broken!).\n");
+               goto bad_descriptor;
+       }
+
+       flush_cache_all(); /* a noop on intel... */
+
+       /* Map the foreign pages directly in to the application */    
+       op = 0;
+       for (i = 0; i < req->nr_segments; i++) {
+
+               unsigned long uvaddr;
+               unsigned long kvaddr;
+               unsigned long ptep;
+
+               uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
+               kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
+
+               /* Map the remote page to kernel. */
+               map[op].host_addr = kvaddr;
+               map[op].dom   = blkif->domid;
+               map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
+               map[op].flags = GNTMAP_host_map;
+               /* This needs a bit more thought in terms of interposition: 
+                * If we want to be able to modify pages during write using 
+                * grant table mappings, the guest will either need to allow 
+                * it, or we'll need to incur a copy. Bit of an fbufs moment. 
;) */
+               if (req->operation == BLKIF_OP_WRITE)
+                       map[op].flags |= GNTMAP_readonly;
+               op++;
+
+               /* Now map it to user. */
+               ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
+               if (ret) {
+                       DPRINTK("Couldn't get a pte addr!\n");
+                       fast_flush_area(pending_idx, req->nr_segments);
+                       goto bad_descriptor;
+               }
+
+               map[op].host_addr = ptep;
+               map[op].dom       = blkif->domid;
+               map[op].ref       = 
blkif_gref_from_fas(req->frame_and_sects[i]);
+               map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
+                       | GNTMAP_contains_pte;
+               /* Above interposition comment applies here as well. */
+               if (req->operation == BLKIF_OP_WRITE)
+                       map[op].flags |= GNTMAP_readonly;
+               op++;
+       }
+
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_map_grant_ref, map, op));
+
+       op = 0;
+       for (i = 0; i < (req->nr_segments*2); i += 2) {
+               unsigned long uvaddr;
+               unsigned long kvaddr;
+               unsigned long offset;
+               int cancel = 0;
+
+               uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
+               kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
+
+               if (unlikely(map[i].handle < 0)) {
+                       DPRINTK("Error on kernel grant mapping (%d)\n",
+                               map[i].handle);
+                       ret = map[i].handle;
+                       cancel = 1;
+               }
+
+               if (unlikely(map[i+1].handle < 0)) {
+                       DPRINTK("Error on user grant mapping (%d)\n",
+                               map[i+1].handle);
+                       ret = map[i+1].handle;
+                       cancel = 1;
+               }
+
+               if (cancel) {
+                       fast_flush_area(pending_idx, req->nr_segments);
+                       goto bad_descriptor;
+               }
+
+               /* Set the necessary mappings in p2m and in the VM_FOREIGN 
+                * vm_area_struct to allow user vaddr -> struct page lookups
+                * to work.  This is needed for direct IO to foreign pages. */
+               phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
+                       FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
+
+               offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
+               ((struct page **)blktap_vma->vm_private_data)[offset] =
+                       pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+
+               /* Save handles for unmapping later. */
+               pending_handle(pending_idx, i/2).kernel = map[i].handle;
+               pending_handle(pending_idx, i/2).user   = map[i+1].handle;
+       }
+
+       /* Mark mapped pages as reserved: */
+       for (i = 0; i < req->nr_segments; i++) {
+               unsigned long kvaddr;
+               kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
+               SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
+       }
+
+       pending_req = &pending_reqs[pending_idx];
+       pending_req->blkif     = blkif;
+       pending_req->id        = req->id;
+       pending_req->operation = req->operation;
+       pending_req->status    = BLKIF_RSP_OKAY;
+       pending_req->nr_pages  = nseg;
+       req->id = MAKE_ID(blkif->domid, pending_idx);
+       //atomic_set(&pending_req->pendcnt, nbio);
+       pending_cons++;
+       blkif_get(blkif);
+
+       /* Finally, write the request message to the user ring. */
+       target = RING_GET_REQUEST(&blktap_ufe_ring,
+                                 blktap_ufe_ring.req_prod_pvt);
+       memcpy(target, req, sizeof(*req));
+       blktap_ufe_ring.req_prod_pvt++;
+       return;
 
  bad_descriptor:
-    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+       make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
 } 
 
 
@@ -837,80 +826,89 @@
 static void make_response(blkif_t *blkif, unsigned long id, 
                           unsigned short op, int st)
 {
-    blkif_response_t *resp;
-    unsigned long     flags;
-    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-
-    /* Place on the response ring for the relevant domain. */ 
-    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-    resp->id        = id;
-    resp->operation = op;
-    resp->status    = st;
-    wmb(); /* Ensure other side can see the response fields. */
-    blk_ring->rsp_prod_pvt++;
-    RING_PUSH_RESPONSES(blk_ring);
-    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
-    /* Kick the relevant domain. */
-    notify_via_evtchn(blkif->evtchn);
+       blkif_response_t *resp;
+       unsigned long     flags;
+       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+
+       /* Place on the response ring for the relevant domain. */ 
+       spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+       resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
+       resp->id        = id;
+       resp->operation = op;
+       resp->status    = st;
+       wmb(); /* Ensure other side can see the response fields. */
+       blk_ring->rsp_prod_pvt++;
+       RING_PUSH_RESPONSES(blk_ring);
+       spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+
+       /* Kick the relevant domain. */
+       notify_via_evtchn(blkif->evtchn);
 }
 
 static struct miscdevice blktap_miscdev = {
-    .minor        = BLKTAP_MINOR,
-    .name         = "blktap",
-    .fops         = &blktap_fops,
-    .devfs_name   = "misc/blktap",
+       .minor        = BLKTAP_MINOR,
+       .name         = "blktap",
+       .fops         = &blktap_fops,
+       .devfs_name   = "misc/blktap",
 };
 
 void blkif_deschedule(blkif_t *blkif)
 {
-    remove_from_blkdev_list(blkif);
+       remove_from_blkdev_list(blkif);
 }
 
 static int __init blkif_init(void)
 {
-    int i, j, err;
-    struct page *page;
+       int i, j, err;
+       struct page *page;
 /*
-    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
-         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
-        return 0;
+  if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
+  !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
+  return 0;
 */
-    blkif_interface_init();
-
-    page = balloon_alloc_empty_page_range(MMAP_PAGES);
-    BUG_ON(page == NULL);
-    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-
-    pending_cons = 0;
-    pending_prod = MAX_PENDING_REQS;
-    memset(pending_reqs, 0, sizeof(pending_reqs));
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-        pending_ring[i] = i;
+       blkif_interface_init();
+
+       page = balloon_alloc_empty_page_range(MMAP_PAGES);
+       BUG_ON(page == NULL);
+       mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+
+       pending_cons = 0;
+       pending_prod = MAX_PENDING_REQS;
+       memset(pending_reqs, 0, sizeof(pending_reqs));
+       for ( i = 0; i < MAX_PENDING_REQS; i++ )
+               pending_ring[i] = i;
     
-    spin_lock_init(&blkio_schedule_list_lock);
-    INIT_LIST_HEAD(&blkio_schedule_list);
-
-    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
-        BUG();
-
-    blkif_xenbus_init();
-
-    for (i=0; i<MAX_PENDING_REQS ; i++)
-        for (j=0; j<BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
-            BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
-
-    err = misc_register(&blktap_miscdev);
-    if ( err != 0 )
-    {
-        printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", err);
-        return err;
-    }
-
-    init_waitqueue_head(&blktap_wait);
-
-    return 0;
+       spin_lock_init(&blkio_schedule_list_lock);
+       INIT_LIST_HEAD(&blkio_schedule_list);
+
+       BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
+
+       blkif_xenbus_init();
+
+       for (i = 0; i < MAX_PENDING_REQS ; i++)
+               for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
+                       BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
+
+       err = misc_register(&blktap_miscdev);
+       if (err != 0) {
+               printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
+                      err);
+               return err;
+       }
+
+       init_waitqueue_head(&blktap_wait);
+
+       return 0;
 }
 
 __initcall(blkif_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blktap/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h  Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h  Thu Sep 22 15:12:14 2005
@@ -33,39 +33,39 @@
 #define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
 
 struct vbd {
-    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
-    unsigned char  readonly;    /* Non-zero -> read-only */
-    unsigned char  type;        /* VDISK_xxx */
-    u32            pdevice;     /* phys device that this vbd maps to */
-    struct block_device *bdev;
+       blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
+       unsigned char  readonly;    /* Non-zero -> read-only */
+       unsigned char  type;        /* VDISK_xxx */
+       u32            pdevice;     /* phys device that this vbd maps to */
+       struct block_device *bdev;
 }; 
 
 typedef struct blkif_st {
-    /* Unique identifier for this interface. */
-    domid_t           domid;
-    unsigned int      handle;
-    /* Physical parameters of the comms window. */
-    unsigned int      evtchn;
-    unsigned int      remote_evtchn;
-    /* Comms information. */
-    blkif_back_ring_t blk_ring;
-    struct vm_struct *blk_ring_area;
-    /* VBDs attached to this interface. */
-    struct vbd        vbd;
-    /* Private fields. */
-    enum { DISCONNECTED, CONNECTED } status;
+       /* Unique identifier for this interface. */
+       domid_t           domid;
+       unsigned int      handle;
+       /* Physical parameters of the comms window. */
+       unsigned int      evtchn;
+       unsigned int      remote_evtchn;
+       /* Comms information. */
+       blkif_back_ring_t blk_ring;
+       struct vm_struct *blk_ring_area;
+       /* VBDs attached to this interface. */
+       struct vbd        vbd;
+       /* Private fields. */
+       enum { DISCONNECTED, CONNECTED } status;
 #ifdef CONFIG_XEN_BLKDEV_TAP_BE
-    /* Is this a blktap frontend */
-    unsigned int     is_blktap;
+       /* Is this a blktap frontend */
+       unsigned int     is_blktap;
 #endif
-    struct list_head blkdev_list;
-    spinlock_t       blk_ring_lock;
-    atomic_t         refcnt;
+       struct list_head blkdev_list;
+       spinlock_t       blk_ring_lock;
+       atomic_t         refcnt;
 
-    struct work_struct free_work;
+       struct work_struct free_work;
 
-    u16              shmem_handle;
-    grant_ref_t      shmem_ref;
+       u16              shmem_handle;
+       grant_ref_t      shmem_ref;
 } blkif_t;
 
 blkif_t *alloc_blkif(domid_t domid);
@@ -89,10 +89,10 @@
 unsigned long vbd_secsize(struct vbd *vbd);
 
 struct phys_req {
-    unsigned short       dev;
-    unsigned short       nr_sects;
-    struct block_device *bdev;
-    blkif_sector_t       sector_number;
+       unsigned short       dev;
+       unsigned short       nr_sects;
+       struct block_device *bdev;
+       blkif_sector_t       sector_number;
 };
 
 int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
@@ -106,3 +106,13 @@
 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 
 #endif /* __BLKIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c  Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c  Thu Sep 22 15:12:14 2005
@@ -222,3 +222,13 @@
 {
        xenbus_register_backend(&blkback);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/console/console.c
--- a/linux-2.6-xen-sparse/drivers/xen/console/console.c        Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c        Thu Sep 22 
15:12:14 2005
@@ -75,31 +75,33 @@
 
 static int __init xencons_setup(char *str)
 {
-    char *q;
-    int n;
-
-    if ( !strncmp(str, "ttyS", 4) )
-        xc_mode = XC_SERIAL;
-    else if ( !strncmp(str, "tty", 3) )
-        xc_mode = XC_TTY;
-    else if ( !strncmp(str, "off", 3) )
-        xc_mode = XC_OFF;
-
-    switch ( xc_mode )
-    {
-    case XC_SERIAL:
-        n = simple_strtol( str+4, &q, 10 );
-        if ( q > (str + 4) ) xc_num = n;
-        break;
-    case XC_TTY:
-        n = simple_strtol( str+3, &q, 10 );
-        if ( q > (str + 3) ) xc_num = n;
-        break;
-    default:
-        break;
-    }
-
-    return 1;
+       char *q;
+       int n;
+
+       if (!strncmp(str, "ttyS", 4))
+               xc_mode = XC_SERIAL;
+       else if (!strncmp(str, "tty", 3))
+               xc_mode = XC_TTY;
+       else if (!strncmp(str, "off", 3))
+               xc_mode = XC_OFF;
+
+       switch ( xc_mode )
+       {
+       case XC_SERIAL:
+               n = simple_strtol(str+4, &q, 10);
+               if (q > (str + 4))
+                       xc_num = n;
+               break;
+       case XC_TTY:
+               n = simple_strtol(str+3, &q, 10);
+               if (q > (str + 3))
+                       xc_num = n;
+               break;
+       default:
+               break;
+       }
+
+       return 1;
 }
 __setup("xencons=", xencons_setup);
 
@@ -111,11 +113,11 @@
 
 static int __init xencons_bufsz_setup(char *str)
 {
-    unsigned int goal;
-    goal = simple_strtoul(str, NULL, 0);
-    while ( wbuf_size < goal )
-        wbuf_size <<= 1;
-    return 1;
+       unsigned int goal;
+       goal = simple_strtoul(str, NULL, 0);
+       while (wbuf_size < goal)
+               wbuf_size <<= 1;
+       return 1;
 }
 __setup("xencons_bufsz=", xencons_bufsz_setup);
 
@@ -135,57 +137,55 @@
 /******************** Kernel console driver ********************************/
 
 static void kcons_write(
-    struct console *c, const char *s, unsigned int count)
-{
-    int           i;
-    unsigned long flags;
-
-    spin_lock_irqsave(&xencons_lock, flags);
+       struct console *c, const char *s, unsigned int count)
+{
+       int           i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&xencons_lock, flags);
     
-    for ( i = 0; i < count; i++ )
-    {
-        if ( (wp - wc) >= (wbuf_size - 1) )
-            break;
-        if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
-            wbuf[WBUF_MASK(wp++)] = '\r';
-    }
-
-    __xencons_tx_flush();
-
-    spin_unlock_irqrestore(&xencons_lock, flags);
+       for (i = 0; i < count; i++) {
+               if ((wp - wc) >= (wbuf_size - 1))
+                       break;
+               if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
+                       wbuf[WBUF_MASK(wp++)] = '\r';
+       }
+
+       __xencons_tx_flush();
+
+       spin_unlock_irqrestore(&xencons_lock, flags);
 }
 
 static void kcons_write_dom0(
-    struct console *c, const char *s, unsigned int count)
-{
-    int rc;
-
-    while ( (count > 0) &&
-            ((rc = HYPERVISOR_console_io(
-                CONSOLEIO_write, count, (char *)s)) > 0) )
-    {
-        count -= rc;
-        s += rc;
-    }
+       struct console *c, const char *s, unsigned int count)
+{
+       int rc;
+
+       while ((count > 0) &&
+              ((rc = HYPERVISOR_console_io(
+                       CONSOLEIO_write, count, (char *)s)) > 0)) {
+               count -= rc;
+               s += rc;
+       }
 }
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 static struct tty_driver *kcons_device(struct console *c, int *index)
 {
-    *index = c->index;
-    return xencons_driver;
+       *index = c->index;
+       return xencons_driver;
 }
 #else
 static kdev_t kcons_device(struct console *c)
 {
-    return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
+       return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
 }
 #endif
 
 static struct console kcons_info = {
-    .device    = kcons_device,
-    .flags     = CON_PRINTBUFFER,
-    .index     = -1,
+       .device = kcons_device,
+       .flags  = CON_PRINTBUFFER,
+       .index  = -1,
 };
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
@@ -196,44 +196,42 @@
 void xen_console_init(void)
 #endif
 {
-    if ( xen_start_info->flags & SIF_INITDOMAIN )
-    {
-        if ( xc_mode == XC_DEFAULT )
-            xc_mode = XC_SERIAL;
-        kcons_info.write = kcons_write_dom0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-        if ( xc_mode == XC_SERIAL )
-            kcons_info.flags |= CON_ENABLED;
-#endif
-    }
-    else
-    {
-        if ( xc_mode == XC_DEFAULT )
-            xc_mode = XC_TTY;
-        kcons_info.write = kcons_write;
-    }
-
-    switch ( xc_mode )
-    {
-    case XC_SERIAL:
-        strcpy(kcons_info.name, "ttyS");
-        if ( xc_num == -1 ) xc_num = 0;
-        break;
-
-    case XC_TTY:
-        strcpy(kcons_info.name, "tty");
-        if ( xc_num == -1 ) xc_num = 1;
-        break;
-
-    default:
-        return __RETCODE;
-    }
-
-    wbuf = alloc_bootmem(wbuf_size);
-
-    register_console(&kcons_info);
-
-    return __RETCODE;
+       if (xen_start_info->flags & SIF_INITDOMAIN) {
+               if (xc_mode == XC_DEFAULT)
+                       xc_mode = XC_SERIAL;
+               kcons_info.write = kcons_write_dom0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+               if (xc_mode == XC_SERIAL)
+                       kcons_info.flags |= CON_ENABLED;
+#endif
+       } else {
+               if (xc_mode == XC_DEFAULT)
+                       xc_mode = XC_TTY;
+               kcons_info.write = kcons_write;
+       }
+
+       switch (xc_mode) {
+       case XC_SERIAL:
+               strcpy(kcons_info.name, "ttyS");
+               if (xc_num == -1)
+                       xc_num = 0;
+               break;
+
+       case XC_TTY:
+               strcpy(kcons_info.name, "tty");
+               if (xc_num == -1)
+                       xc_num = 1;
+               break;
+
+       default:
+               return __RETCODE;
+       }
+
+       wbuf = alloc_bootmem(wbuf_size);
+
+       register_console(&kcons_info);
+
+       return __RETCODE;
 }
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 console_initcall(xen_console_init);
@@ -246,41 +244,40 @@
 asmlinkage int xprintk(const char *fmt, ...)
 #endif
 {
-    va_list args;
-    int printk_len;
-    static char printk_buf[1024];
+       va_list args;
+       int printk_len;
+       static char printk_buf[1024];
     
-    /* Emit the output into the temporary buffer */
-    va_start(args, fmt);
-    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
-    va_end(args);
-
-    /* Send the processed output directly to Xen. */
-    kcons_write_dom0(NULL, printk_buf, printk_len);
-
-    return 0;
+       /* Emit the output into the temporary buffer */
+       va_start(args, fmt);
+       printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+       va_end(args);
+
+       /* Send the processed output directly to Xen. */
+       kcons_write_dom0(NULL, printk_buf, printk_len);
+
+       return 0;
 }
 
 /*** Forcibly flush console data before dying. ***/
 void xencons_force_flush(void)
 {
-    int        sz;
-
-    /* Emergency console is synchronous, so there's nothing to flush. */
-    if ( xen_start_info->flags & SIF_INITDOMAIN )
-        return;
-
-
-    /* Spin until console data is flushed through to the domain controller. */
-    while ( (wc != wp) )
-    {
-       int sent = 0;
-        if ( (sz = wp - wc) == 0 )
-            continue;
-       sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-       if (sent > 0)
-           wc += sent;
-    }
+       int sz;
+
+       /* Emergency console is synchronous, so there's nothing to flush. */
+       if (xen_start_info->flags & SIF_INITDOMAIN)
+               return;
+
+
+       /* Spin until console data is flushed through to the daemon. */
+       while (wc != wp) {
+               int sent = 0;
+               if ((sz = wp - wc) == 0)
+                       continue;
+               sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
+               if (sent > 0)
+                       wc += sent;
+       }
 }
 
 
@@ -305,362 +302,358 @@
 /* Non-privileged receive callback. */
 static void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
 {
-    int           i;
-    unsigned long flags;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    if ( xencons_tty != NULL )
-    {
-        for ( i = 0; i < len; i++ ) {
+       int           i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+       if (xencons_tty == NULL)
+               goto out;
+
+       for (i = 0; i < len; i++) {
 #ifdef CONFIG_MAGIC_SYSRQ
-            if (sysrq_enabled) {
-                if (buf[i] == '\x0f') { /* ^O */
-                    sysrq_requested = jiffies;
-                    continue; /* don't print the sysrq key */
-                } else if (sysrq_requested) {
-                    unsigned long sysrq_timeout = sysrq_requested + HZ*2;
-                    sysrq_requested = 0;
-                    /* if it's been less than a timeout, do the sysrq */
-                    if (time_before(jiffies, sysrq_timeout)) {
-                        spin_unlock_irqrestore(&xencons_lock, flags);
-                        handle_sysrq(buf[i], regs, xencons_tty);
-                        spin_lock_irqsave(&xencons_lock, flags);
-                        continue;
-                    }
-                }
-            }
-#endif
-            tty_insert_flip_char(xencons_tty, buf[i], 0);
-        }
-        tty_flip_buffer_push(xencons_tty);
-    }
-    spin_unlock_irqrestore(&xencons_lock, flags);
-
+               if (sysrq_enabled) {
+                       if (buf[i] == '\x0f') { /* ^O */
+                               sysrq_requested = jiffies;
+                               continue; /* don't print the sysrq key */
+                       } else if (sysrq_requested) {
+                               unsigned long sysrq_timeout =
+                                       sysrq_requested + HZ*2;
+                               sysrq_requested = 0;
+                               if (time_before(jiffies, sysrq_timeout)) {
+                                       spin_unlock_irqrestore(
+                                               &xencons_lock, flags);
+                                       handle_sysrq(
+                                               buf[i], regs, xencons_tty);
+                                       spin_lock_irqsave(
+                                               &xencons_lock, flags);
+                                       continue;
+                               }
+                       }
+               }
+#endif
+               tty_insert_flip_char(xencons_tty, buf[i], 0);
+       }
+       tty_flip_buffer_push(xencons_tty);
+
+ out:
+       spin_unlock_irqrestore(&xencons_lock, flags);
 }
 
 /* Privileged and non-privileged transmit worker. */
 static void __xencons_tx_flush(void)
 {
-    int        sz, work_done = 0;
-
-    if ( xen_start_info->flags & SIF_INITDOMAIN )
-    {
-        if ( x_char )
-        {
-            kcons_write_dom0(NULL, &x_char, 1);
-            x_char = 0;
-            work_done = 1;
-        }
-
-        while ( wc != wp )
-        {
-            sz = wp - wc;
-            if ( sz > (wbuf_size - WBUF_MASK(wc)) )
-                sz = wbuf_size - WBUF_MASK(wc);
-            kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
-            wc += sz;
-            work_done = 1;
-        }
-    }
-    else
-    {
-        while ( x_char )
-        {
-           if (xencons_ring_send(&x_char, 1) == 1) {
-               x_char = 0;
-               work_done = 1;
-           }
-        }
-
-        while ( wc != wp )
-        {
-           int sent;
-            sz = wp - wc;
-           if ( sz > (wbuf_size - WBUF_MASK(wc)) )
-               sz = wbuf_size - WBUF_MASK(wc);
-           sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-           if ( sent > 0 ) {
-               wc += sent;
-               work_done = 1;
-           }
-        }
-    }
-
-    if ( work_done && (xencons_tty != NULL) )
-    {
-        wake_up_interruptible(&xencons_tty->write_wait);
-        if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-             (xencons_tty->ldisc.write_wakeup != NULL) )
-            (xencons_tty->ldisc.write_wakeup)(xencons_tty);
-    }
+       int sz, work_done = 0;
+
+       if (xen_start_info->flags & SIF_INITDOMAIN) {
+               if (x_char) {
+                       kcons_write_dom0(NULL, &x_char, 1);
+                       x_char = 0;
+                       work_done = 1;
+               }
+
+               while (wc != wp) {
+                       sz = wp - wc;
+                       if (sz > (wbuf_size - WBUF_MASK(wc)))
+                               sz = wbuf_size - WBUF_MASK(wc);
+                       kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
+                       wc += sz;
+                       work_done = 1;
+               }
+       } else {
+               while (x_char) {
+                       if (xencons_ring_send(&x_char, 1) == 1) {
+                               x_char = 0;
+                               work_done = 1;
+                       }
+               }
+
+               while (wc != wp) {
+                       int sent;
+                       sz = wp - wc;
+                       if (sz > (wbuf_size - WBUF_MASK(wc)))
+                               sz = wbuf_size - WBUF_MASK(wc);
+                       sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
+                       if (sent > 0) {
+                               wc += sent;
+                               work_done = 1;
+                       }
+               }
+       }
+
+       if (work_done && (xencons_tty != NULL))
+       {
+               wake_up_interruptible(&xencons_tty->write_wait);
+               if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+                   (xencons_tty->ldisc.write_wakeup != NULL))
+                       (xencons_tty->ldisc.write_wakeup)(xencons_tty);
+       }
 }
 
 /* Privileged receive callback and transmit kicker. */
 static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
                                           struct pt_regs *regs)
 {
-    static char   rbuf[16];
-    int           i, l;
-    unsigned long flags;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-
-    if ( xencons_tty != NULL )
-    {
-        /* Receive work. */
-        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
-            for ( i = 0; i < l; i++ )
-                tty_insert_flip_char(xencons_tty, rbuf[i], 0);
-        if ( xencons_tty->flip.count != 0 )
-            tty_flip_buffer_push(xencons_tty);
-    }
-
-    /* Transmit work. */
-    __xencons_tx_flush();
-
-    spin_unlock_irqrestore(&xencons_lock, flags);
-
-    return IRQ_HANDLED;
+       static char   rbuf[16];
+       int           i, l;
+       unsigned long flags;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+
+       if (xencons_tty != NULL)
+       {
+               /* Receive work. */
+               while ((l = HYPERVISOR_console_io(
+                       CONSOLEIO_read, 16, rbuf)) > 0)
+                       for (i = 0; i < l; i++)
+                               tty_insert_flip_char(xencons_tty, rbuf[i], 0);
+               if (xencons_tty->flip.count != 0)
+                       tty_flip_buffer_push(xencons_tty);
+       }
+
+       /* Transmit work. */
+       __xencons_tx_flush();
+
+       spin_unlock_irqrestore(&xencons_lock, flags);
+
+       return IRQ_HANDLED;
 }
 
 static int xencons_write_room(struct tty_struct *tty)
 {
-    return wbuf_size - (wp - wc);
+       return wbuf_size - (wp - wc);
 }
 
 static int xencons_chars_in_buffer(struct tty_struct *tty)
 {
-    return wp - wc;
+       return wp - wc;
 }
 
 static void xencons_send_xchar(struct tty_struct *tty, char ch)
 {
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    x_char = ch;
-    __xencons_tx_flush();
-    spin_unlock_irqrestore(&xencons_lock, flags);
+       unsigned long flags;
+
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+       x_char = ch;
+       __xencons_tx_flush();
+       spin_unlock_irqrestore(&xencons_lock, flags);
 }
 
 static void xencons_throttle(struct tty_struct *tty)
 {
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    if ( I_IXOFF(tty) )
-        xencons_send_xchar(tty, STOP_CHAR(tty));
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       if (I_IXOFF(tty))
+               xencons_send_xchar(tty, STOP_CHAR(tty));
 }
 
 static void xencons_unthrottle(struct tty_struct *tty)
 {
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    if ( I_IXOFF(tty) )
-    {
-        if ( x_char != 0 )
-            x_char = 0;
-        else
-            xencons_send_xchar(tty, START_CHAR(tty));
-    }
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       if (I_IXOFF(tty)) {
+               if (x_char != 0)
+                       x_char = 0;
+               else
+                       xencons_send_xchar(tty, START_CHAR(tty));
+       }
 }
 
 static void xencons_flush_buffer(struct tty_struct *tty)
 {
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    wc = wp = 0;
-    spin_unlock_irqrestore(&xencons_lock, flags);
+       unsigned long flags;
+
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+       wc = wp = 0;
+       spin_unlock_irqrestore(&xencons_lock, flags);
 }
 
 static inline int __xencons_put_char(int ch)
 {
-    char _ch = (char)ch;
-    if ( (wp - wc) == wbuf_size )
-        return 0;
-    wbuf[WBUF_MASK(wp++)] = _ch;
-    return 1;
+       char _ch = (char)ch;
+       if ((wp - wc) == wbuf_size)
+               return 0;
+       wbuf[WBUF_MASK(wp++)] = _ch;
+       return 1;
 }
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 static int xencons_write(
-    struct tty_struct *tty,
-    const unsigned char *buf,
-    int count)
-{
-    int i;
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return count;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-
-    for ( i = 0; i < count; i++ )
-        if ( !__xencons_put_char(buf[i]) )
-            break;
-
-    if ( i != 0 )
-        __xencons_tx_flush();
-
-    spin_unlock_irqrestore(&xencons_lock, flags);
-
-    return i;
+       struct tty_struct *tty,
+       const unsigned char *buf,
+       int count)
+{
+       int i;
+       unsigned long flags;
+
+       if (TTY_INDEX(tty) != 0)
+               return count;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+
+       for (i = 0; i < count; i++)
+               if (!__xencons_put_char(buf[i]))
+                       break;
+
+       if (i != 0)
+               __xencons_tx_flush();
+
+       spin_unlock_irqrestore(&xencons_lock, flags);
+
+       return i;
 }
 #else
 static int xencons_write(
-    struct tty_struct *tty, 
-    int from_user,
-    const u_char *buf, 
-    int count)
-{
-    int i;
-    unsigned long flags;
-
-    if ( from_user && verify_area(VERIFY_READ, buf, count) )
-        return -EINVAL;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return count;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-
-    for ( i = 0; i < count; i++ )
-    {
-        char ch;
-        if ( from_user )
-            __get_user(ch, buf + i);
-        else
-            ch = buf[i];
-        if ( !__xencons_put_char(ch) )
-            break;
-    }
-
-    if ( i != 0 )
-        __xencons_tx_flush();
-
-    spin_unlock_irqrestore(&xencons_lock, flags);
-
-    return i;
+       struct tty_struct *tty, 
+       int from_user,
+       const u_char *buf, 
+       int count)
+{
+       int i;
+       unsigned long flags;
+
+       if (from_user && verify_area(VERIFY_READ, buf, count))
+               return -EINVAL;
+
+       if (TTY_INDEX(tty) != 0)
+               return count;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+
+       for (i = 0; i < count; i++) {
+               char ch;
+               if (from_user)
+                       __get_user(ch, buf + i);
+               else
+                       ch = buf[i];
+               if (!__xencons_put_char(ch))
+                       break;
+       }
+
+       if (i != 0)
+               __xencons_tx_flush();
+
+       spin_unlock_irqrestore(&xencons_lock, flags);
+
+       return i;
 }
 #endif
 
 static void xencons_put_char(struct tty_struct *tty, u_char ch)
 {
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    (void)__xencons_put_char(ch);
-    spin_unlock_irqrestore(&xencons_lock, flags);
+       unsigned long flags;
+
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+       (void)__xencons_put_char(ch);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 }
 
 static void xencons_flush_chars(struct tty_struct *tty)
 {
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    __xencons_tx_flush();
-    spin_unlock_irqrestore(&xencons_lock, flags);    
+       unsigned long flags;
+
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+       __xencons_tx_flush();
+       spin_unlock_irqrestore(&xencons_lock, flags);    
 }
 
 static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
 {
-    unsigned long orig_jiffies = jiffies;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    while ( DRV(tty->driver)->chars_in_buffer(tty) )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(1);
-        if ( signal_pending(current) )
-            break;
-        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
-            break;
-    }
+       unsigned long orig_jiffies = jiffies;
+
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       while (DRV(tty->driver)->chars_in_buffer(tty))
+       {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(1);
+               if (signal_pending(current))
+                       break;
+               if ( (timeout != 0) &&
+                    time_after(jiffies, orig_jiffies + timeout) )
+                       break;
+       }
     
-    set_current_state(TASK_RUNNING);
+       set_current_state(TASK_RUNNING);
 }
 
 static int xencons_open(struct tty_struct *tty, struct file *filp)
 {
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return 0;
-
-    spin_lock_irqsave(&xencons_lock, flags);
-    tty->driver_data = NULL;
-    if ( xencons_tty == NULL )
-        xencons_tty = tty;
-    __xencons_tx_flush();
-    spin_unlock_irqrestore(&xencons_lock, flags);    
-
-    return 0;
+       unsigned long flags;
+
+       if (TTY_INDEX(tty) != 0)
+               return 0;
+
+       spin_lock_irqsave(&xencons_lock, flags);
+       tty->driver_data = NULL;
+       if (xencons_tty == NULL)
+               xencons_tty = tty;
+       __xencons_tx_flush();
+       spin_unlock_irqrestore(&xencons_lock, flags);    
+
+       return 0;
 }
 
 static void xencons_close(struct tty_struct *tty, struct file *filp)
 {
-    unsigned long flags;
-
-    if ( TTY_INDEX(tty) != 0 )
-        return;
-
-    if ( tty->count == 1 )
-    {
-        tty->closing = 1;
-        tty_wait_until_sent(tty, 0);
-        if ( DRV(tty->driver)->flush_buffer != NULL )
-            DRV(tty->driver)->flush_buffer(tty);
-        if ( tty->ldisc.flush_buffer != NULL )
-            tty->ldisc.flush_buffer(tty);
-        tty->closing = 0;
-        spin_lock_irqsave(&xencons_lock, flags);
-        xencons_tty = NULL;
-        spin_unlock_irqrestore(&xencons_lock, flags);    
-    }
+       unsigned long flags;
+
+       if (TTY_INDEX(tty) != 0)
+               return;
+
+       if (tty->count == 1) {
+               tty->closing = 1;
+               tty_wait_until_sent(tty, 0);
+               if (DRV(tty->driver)->flush_buffer != NULL)
+                       DRV(tty->driver)->flush_buffer(tty);
+               if (tty->ldisc.flush_buffer != NULL)
+                       tty->ldisc.flush_buffer(tty);
+               tty->closing = 0;
+               spin_lock_irqsave(&xencons_lock, flags);
+               xencons_tty = NULL;
+               spin_unlock_irqrestore(&xencons_lock, flags);    
+       }
 }
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 static struct tty_operations xencons_ops = {
-    .open = xencons_open,
-    .close = xencons_close,
-    .write = xencons_write,
-    .write_room = xencons_write_room,
-    .put_char = xencons_put_char,
-    .flush_chars = xencons_flush_chars,
-    .chars_in_buffer = xencons_chars_in_buffer,
-    .send_xchar = xencons_send_xchar,
-    .flush_buffer = xencons_flush_buffer,
-    .throttle = xencons_throttle,
-    .unthrottle = xencons_unthrottle,
-    .wait_until_sent = xencons_wait_until_sent,
+       .open = xencons_open,
+       .close = xencons_close,
+       .write = xencons_write,
+       .write_room = xencons_write_room,
+       .put_char = xencons_put_char,
+       .flush_chars = xencons_flush_chars,
+       .chars_in_buffer = xencons_chars_in_buffer,
+       .send_xchar = xencons_send_xchar,
+       .flush_buffer = xencons_flush_buffer,
+       .throttle = xencons_throttle,
+       .unthrottle = xencons_unthrottle,
+       .wait_until_sent = xencons_wait_until_sent,
 };
 
 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
 static const char *xennullcon_startup(void)
 {
-    return NULL;
+       return NULL;
 }
 
 static int xennullcon_dummy(void)
 {
-    return 0;
+       return 0;
 }
 
 #define DUMMY (void *)xennullcon_dummy
@@ -672,122 +665,128 @@
  */
 
 const struct consw xennull_con = {
-    .owner =           THIS_MODULE,
-    .con_startup =     xennullcon_startup,
-    .con_init =                DUMMY,
-    .con_deinit =      DUMMY,
-    .con_clear =       DUMMY,
-    .con_putc =                DUMMY,
-    .con_putcs =       DUMMY,
-    .con_cursor =      DUMMY,
-    .con_scroll =      DUMMY,
-    .con_bmove =       DUMMY,
-    .con_switch =      DUMMY,
-    .con_blank =       DUMMY,
-    .con_font_set =    DUMMY,
-    .con_font_get =    DUMMY,
-    .con_font_default =        DUMMY,
-    .con_font_copy =   DUMMY,
-    .con_set_palette = DUMMY,
-    .con_scrolldelta = DUMMY,
+       .owner =                THIS_MODULE,
+       .con_startup =  xennullcon_startup,
+       .con_init =             DUMMY,
+       .con_deinit =   DUMMY,
+       .con_clear =    DUMMY,
+       .con_putc =             DUMMY,
+       .con_putcs =    DUMMY,
+       .con_cursor =   DUMMY,
+       .con_scroll =   DUMMY,
+       .con_bmove =    DUMMY,
+       .con_switch =   DUMMY,
+       .con_blank =    DUMMY,
+       .con_font_set = DUMMY,
+       .con_font_get = DUMMY,
+       .con_font_default =     DUMMY,
+       .con_font_copy =        DUMMY,
+       .con_set_palette =      DUMMY,
+       .con_scrolldelta =      DUMMY,
 };
 #endif
 #endif
 
 static int __init xencons_init(void)
 {
-    int rc;
-
-    if ( xc_mode == XC_OFF )
-        return 0;
-
-    xencons_ring_init();
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
-                                      1 : MAX_NR_CONSOLES);
-    if ( xencons_driver == NULL )
-        return -ENOMEM;
+       int rc;
+
+       if (xc_mode == XC_OFF)
+               return 0;
+
+       xencons_ring_init();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+       xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
+                                         1 : MAX_NR_CONSOLES);
+       if (xencons_driver == NULL)
+               return -ENOMEM;
 #else
-    memset(&xencons_driver, 0, sizeof(struct tty_driver));
-    xencons_driver.magic       = TTY_DRIVER_MAGIC;
-    xencons_driver.refcount    = &xencons_refcount;
-    xencons_driver.table       = xencons_table;
-    xencons_driver.num         = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
-#endif
-
-    DRV(xencons_driver)->major           = TTY_MAJOR;
-    DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
-    DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
-    DRV(xencons_driver)->init_termios    = tty_std_termios;
-    DRV(xencons_driver)->flags           = 
-        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
-    DRV(xencons_driver)->termios         = xencons_termios;
-    DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
-
-    if ( xc_mode == XC_SERIAL )
-    {
-        DRV(xencons_driver)->name        = "ttyS";
-        DRV(xencons_driver)->minor_start = 64 + xc_num;
-        DRV(xencons_driver)->name_base   = 0 + xc_num;
-    }
-    else
-    {
-        DRV(xencons_driver)->name        = "tty";
-        DRV(xencons_driver)->minor_start = xc_num;
-        DRV(xencons_driver)->name_base   = xc_num;
-    }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    tty_set_operations(xencons_driver, &xencons_ops);
+       memset(&xencons_driver, 0, sizeof(struct tty_driver));
+       xencons_driver.magic       = TTY_DRIVER_MAGIC;
+       xencons_driver.refcount    = &xencons_refcount;
+       xencons_driver.table       = xencons_table;
+       xencons_driver.num         =
+               (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
+#endif
+
+       DRV(xencons_driver)->major           = TTY_MAJOR;
+       DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
+       DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
+       DRV(xencons_driver)->init_termios    = tty_std_termios;
+       DRV(xencons_driver)->flags           = 
+               TTY_DRIVER_REAL_RAW |
+               TTY_DRIVER_RESET_TERMIOS |
+               TTY_DRIVER_NO_DEVFS;
+       DRV(xencons_driver)->termios         = xencons_termios;
+       DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
+
+       if (xc_mode == XC_SERIAL)
+       {
+               DRV(xencons_driver)->name        = "ttyS";
+               DRV(xencons_driver)->minor_start = 64 + xc_num;
+               DRV(xencons_driver)->name_base   = 0 + xc_num;
+       } else {
+               DRV(xencons_driver)->name        = "tty";
+               DRV(xencons_driver)->minor_start = xc_num;
+               DRV(xencons_driver)->name_base   = xc_num;
+       }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+       tty_set_operations(xencons_driver, &xencons_ops);
 #else
-    xencons_driver.open            = xencons_open;
-    xencons_driver.close           = xencons_close;
-    xencons_driver.write           = xencons_write;
-    xencons_driver.write_room      = xencons_write_room;
-    xencons_driver.put_char        = xencons_put_char;
-    xencons_driver.flush_chars     = xencons_flush_chars;
-    xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
-    xencons_driver.send_xchar      = xencons_send_xchar;
-    xencons_driver.flush_buffer    = xencons_flush_buffer;
-    xencons_driver.throttle        = xencons_throttle;
-    xencons_driver.unthrottle      = xencons_unthrottle;
-    xencons_driver.wait_until_sent = xencons_wait_until_sent;
-#endif
-
-    if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 )
-    {
-        printk("WARNING: Failed to register Xen virtual "
-               "console driver as '%s%d'\n",
-               DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-        put_tty_driver(xencons_driver);
-        xencons_driver = NULL;
-#endif
-        return rc;
-    }
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    tty_register_device(xencons_driver, 0, NULL);
-#endif
-
-    if ( xen_start_info->flags & SIF_INITDOMAIN )
-    {
-        xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
-        (void)request_irq(xencons_priv_irq,
-                          xencons_priv_interrupt, 0, "console", NULL);
-    }
-    else
-    {
-       
-       xencons_ring_register_receiver(xencons_rx);
-    }
-
-    printk("Xen virtual console successfully installed as %s%d\n",
-           DRV(xencons_driver)->name,
-           DRV(xencons_driver)->name_base );
+       xencons_driver.open            = xencons_open;
+       xencons_driver.close           = xencons_close;
+       xencons_driver.write           = xencons_write;
+       xencons_driver.write_room      = xencons_write_room;
+       xencons_driver.put_char        = xencons_put_char;
+       xencons_driver.flush_chars     = xencons_flush_chars;
+       xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
+       xencons_driver.send_xchar      = xencons_send_xchar;
+       xencons_driver.flush_buffer    = xencons_flush_buffer;
+       xencons_driver.throttle        = xencons_throttle;
+       xencons_driver.unthrottle      = xencons_unthrottle;
+       xencons_driver.wait_until_sent = xencons_wait_until_sent;
+#endif
+
+       if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
+               printk("WARNING: Failed to register Xen virtual "
+                      "console driver as '%s%d'\n",
+                      DRV(xencons_driver)->name, 
DRV(xencons_driver)->name_base);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+               put_tty_driver(xencons_driver);
+               xencons_driver = NULL;
+#endif
+               return rc;
+       }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+       tty_register_device(xencons_driver, 0, NULL);
+#endif
+
+       if (xen_start_info->flags & SIF_INITDOMAIN) {
+               xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
+               (void)request_irq(xencons_priv_irq,
+                                 xencons_priv_interrupt, 0, "console", NULL);
+       } else {
+               xencons_ring_register_receiver(xencons_rx);
+       }
+
+       printk("Xen virtual console successfully installed as %s%d\n",
+              DRV(xencons_driver)->name,
+              DRV(xencons_driver)->name_base );
     
-    return 0;
+       return 0;
 }
 
 module_init(xencons_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c
--- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c   Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c   Thu Sep 22 
15:12:14 2005
@@ -125,3 +125,13 @@
 
        (void)xencons_ring_init();
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h
--- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h   Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h   Thu Sep 22 
15:12:14 2005
@@ -3,12 +3,21 @@
 
 asmlinkage int xprintk(const char *fmt, ...);
 
-
 int xencons_ring_init(void);
 int xencons_ring_send(const char *data, unsigned len);
 
-typedef void (xencons_receiver_func)(char *buf, unsigned len, 
-                                     struct pt_regs *regs);
+typedef void (xencons_receiver_func)(
+       char *buf, unsigned len, struct pt_regs *regs);
 void xencons_ring_register_receiver(xencons_receiver_func *f);
 
 #endif /* _XENCONS_RING_H */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c
--- a/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c  Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c  Thu Sep 22 15:12:14 2005
@@ -1,9 +1,9 @@
 /******************************************************************************
  * evtchn.c
  * 
- * Xenolinux driver for receiving and demuxing event-channel signals.
- * 
- * Copyright (c) 2004, K A Fraser
+ * Driver for receiving and demuxing event-channel signals.
+ * 
+ * Copyright (c) 2004-2005, K A Fraser
  * Multi-process extensions Copyright (c) 2004, Steven Smith
  * 
  * This file may be distributed separately from the Linux kernel, or
@@ -46,29 +46,18 @@
 #include <linux/init.h>
 #define XEN_EVTCHN_MASK_OPS
 #include <asm-xen/evtchn.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#include <linux/devfs_fs_kernel.h>
-#define OLD_DEVFS
-#else
 #include <linux/gfp.h>
-#endif
-
-#ifdef OLD_DEVFS
-/* NB. This must be shared amongst drivers if more things go in /dev/xen */
-static devfs_handle_t xen_dev_dir;
-#endif
 
 struct per_user_data {
-    /* Notification ring, accessed via /dev/xen/evtchn. */
-#   define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
-#   define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
-    u16 *ring;
-    unsigned int ring_cons, ring_prod, ring_overflow;
-
-    /* Processes wait on this queue when ring is empty. */
-    wait_queue_head_t evtchn_wait;
-    struct fasync_struct *evtchn_async_queue;
+       /* Notification ring, accessed via /dev/xen/evtchn. */
+#define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
+       u16 *ring;
+       unsigned int ring_cons, ring_prod, ring_overflow;
+
+       /* Processes wait on this queue when ring is empty. */
+       wait_queue_head_t evtchn_wait;
+       struct fasync_struct *evtchn_async_queue;
 };
 
 /* Who's bound to each port? */
@@ -77,356 +66,310 @@
 
 void evtchn_device_upcall(int port)
 {
-    struct per_user_data *u;
-
-    spin_lock(&port_user_lock);
-
-    mask_evtchn(port);
-    clear_evtchn(port);
-
-    if ( (u = port_user[port]) != NULL )
-    {
-        if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE )
-        {
-            u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
-            if ( u->ring_cons == u->ring_prod++ )
-            {
-                wake_up_interruptible(&u->evtchn_wait);
-                kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
-            }
-        }
-        else
-        {
-            u->ring_overflow = 1;
-        }
-    }
-
-    spin_unlock(&port_user_lock);
+       struct per_user_data *u;
+
+       spin_lock(&port_user_lock);
+
+       mask_evtchn(port);
+       clear_evtchn(port);
+
+       if ((u = port_user[port]) != NULL) {
+               if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
+                       u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
+                       if (u->ring_cons == u->ring_prod++) {
+                               wake_up_interruptible(&u->evtchn_wait);
+                               kill_fasync(&u->evtchn_async_queue, SIGIO, 
POLL_IN);
+                       }
+               } else {
+                       u->ring_overflow = 1;
+               }
+       }
+
+       spin_unlock(&port_user_lock);
 }
 
 static ssize_t evtchn_read(struct file *file, char *buf,
                            size_t count, loff_t *ppos)
 {
-    int rc;
-    unsigned int c, p, bytes1 = 0, bytes2 = 0;
-    DECLARE_WAITQUEUE(wait, current);
-    struct per_user_data *u = file->private_data;
-
-    add_wait_queue(&u->evtchn_wait, &wait);
-
-    count &= ~1; /* even number of bytes */
-
-    if ( count == 0 )
-    {
-        rc = 0;
-        goto out;
-    }
-
-    if ( count > PAGE_SIZE )
-        count = PAGE_SIZE;
-
-    for ( ; ; )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-
-        if ( (c = u->ring_cons) != (p = u->ring_prod) )
-            break;
-
-        if ( u->ring_overflow )
-        {
-            rc = -EFBIG;
-            goto out;
-        }
-
-        if ( file->f_flags & O_NONBLOCK )
-        {
-            rc = -EAGAIN;
-            goto out;
-        }
-
-        if ( signal_pending(current) )
-        {
-            rc = -ERESTARTSYS;
-            goto out;
-        }
-
-        schedule();
-    }
-
-    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
-    if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 )
-    {
-        bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16);
-        bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
-    }
-    else
-    {
-        bytes1 = (p - c) * sizeof(u16);
-        bytes2 = 0;
-    }
-
-    /* Truncate chunks according to caller's maximum byte count. */
-    if ( bytes1 > count )
-    {
-        bytes1 = count;
-        bytes2 = 0;
-    }
-    else if ( (bytes1 + bytes2) > count )
-    {
-        bytes2 = count - bytes1;
-    }
-
-    if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
-         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
-
-    u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
-
-    rc = bytes1 + bytes2;
+       int rc;
+       unsigned int c, p, bytes1 = 0, bytes2 = 0;
+       DECLARE_WAITQUEUE(wait, current);
+       struct per_user_data *u = file->private_data;
+
+       add_wait_queue(&u->evtchn_wait, &wait);
+
+       count &= ~1; /* even number of bytes */
+
+       if (count == 0) {
+               rc = 0;
+               goto out;
+       }
+
+       if (count > PAGE_SIZE)
+               count = PAGE_SIZE;
+
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               if ((c = u->ring_cons) != (p = u->ring_prod))
+                       break;
+
+               if (u->ring_overflow) {
+                       rc = -EFBIG;
+                       goto out;
+               }
+
+               if (file->f_flags & O_NONBLOCK) {
+                       rc = -EAGAIN;
+                       goto out;
+               }
+
+               if (signal_pending(current)) {
+                       rc = -ERESTARTSYS;
+                       goto out;
+               }
+
+               schedule();
+       }
+
+       /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
+       if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
+               bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
+                       sizeof(u16);
+               bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
+       } else {
+               bytes1 = (p - c) * sizeof(u16);
+               bytes2 = 0;
+       }
+
+       /* Truncate chunks according to caller's maximum byte count. */
+       if (bytes1 > count) {
+               bytes1 = count;
+               bytes2 = 0;
+       } else if ((bytes1 + bytes2) > count) {
+               bytes2 = count - bytes1;
+       }
+
+       if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
+           ((bytes2 != 0) &&
+            copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
+
+       rc = bytes1 + bytes2;
 
  out:
-    __set_current_state(TASK_RUNNING);
-    remove_wait_queue(&u->evtchn_wait, &wait);
-    return rc;
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&u->evtchn_wait, &wait);
+       return rc;
 }
 
 static ssize_t evtchn_write(struct file *file, const char *buf,
                             size_t count, loff_t *ppos)
 {
-    int  rc, i;
-    u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
-    struct per_user_data *u = file->private_data;
-
-    if ( kbuf == NULL )
-        return -ENOMEM;
-
-    count &= ~1; /* even number of bytes */
-
-    if ( count == 0 )
-    {
-        rc = 0;
-        goto out;
-    }
-
-    if ( count > PAGE_SIZE )
-        count = PAGE_SIZE;
-
-    if ( copy_from_user(kbuf, buf, count) != 0 )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
-
-    spin_lock_irq(&port_user_lock);
-    for ( i = 0; i < (count/2); i++ )
-        if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
-            unmask_evtchn(kbuf[i]);
-    spin_unlock_irq(&port_user_lock);
-
-    rc = count;
+       int  rc, i;
+       u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
+       struct per_user_data *u = file->private_data;
+
+       if (kbuf == NULL)
+               return -ENOMEM;
+
+       count &= ~1; /* even number of bytes */
+
+       if (count == 0) {
+               rc = 0;
+               goto out;
+       }
+
+       if (count > PAGE_SIZE)
+               count = PAGE_SIZE;
+
+       if (copy_from_user(kbuf, buf, count) != 0) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       spin_lock_irq(&port_user_lock);
+       for (i = 0; i < (count/2); i++)
+               if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
+                       unmask_evtchn(kbuf[i]);
+       spin_unlock_irq(&port_user_lock);
+
+       rc = count;
 
  out:
-    free_page((unsigned long)kbuf);
-    return rc;
+       free_page((unsigned long)kbuf);
+       return rc;
 }
 
 static int evtchn_ioctl(struct inode *inode, struct file *file,
                         unsigned int cmd, unsigned long arg)
 {
-    int rc = 0;
-    struct per_user_data *u = file->private_data;
-
-    spin_lock_irq(&port_user_lock);
+       int rc = 0;
+       struct per_user_data *u = file->private_data;
+
+       spin_lock_irq(&port_user_lock);
     
-    switch ( cmd )
-    {
-    case EVTCHN_RESET:
-        /* Initialise the ring to empty. Clear errors. */
-        u->ring_cons = u->ring_prod = u->ring_overflow = 0;
-        break;
-
-    case EVTCHN_BIND:
-        if ( arg >= NR_EVENT_CHANNELS )
-        {
-            rc = -EINVAL;
-        }
-        else if ( port_user[arg] != NULL )
-        {
-            rc = -EISCONN;
-        }
-        else
-        {
-            port_user[arg] = u;
-            unmask_evtchn(arg);
-        }
-        break;
-
-    case EVTCHN_UNBIND:
-        if ( arg >= NR_EVENT_CHANNELS )
-        {
-            rc = -EINVAL;
-        }
-        else if ( port_user[arg] != u )
-        {
-            rc = -ENOTCONN;
-        }
-        else
-        {
-            port_user[arg] = NULL;
-            mask_evtchn(arg);
-        }
-        break;
-
-    default:
-        rc = -ENOSYS;
-        break;
-    }
-
-    spin_unlock_irq(&port_user_lock);   
-
-    return rc;
+       switch (cmd) {
+       case EVTCHN_RESET:
+               /* Initialise the ring to empty. Clear errors. */
+               u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+               break;
+
+       case EVTCHN_BIND:
+               if (arg >= NR_EVENT_CHANNELS) {
+                       rc = -EINVAL;
+               } else if (port_user[arg] != NULL) {
+                       rc = -EISCONN;
+               } else {
+                       port_user[arg] = u;
+                       unmask_evtchn(arg);
+               }
+               break;
+
+       case EVTCHN_UNBIND:
+               if (arg >= NR_EVENT_CHANNELS) {
+                       rc = -EINVAL;
+               } else if (port_user[arg] != u) {
+                       rc = -ENOTCONN;
+               } else {
+                       port_user[arg] = NULL;
+                       mask_evtchn(arg);
+               }
+               break;
+
+       default:
+               rc = -ENOSYS;
+               break;
+       }
+
+       spin_unlock_irq(&port_user_lock);   
+
+       return rc;
 }
 
 static unsigned int evtchn_poll(struct file *file, poll_table *wait)
 {
-    unsigned int mask = POLLOUT | POLLWRNORM;
-    struct per_user_data *u = file->private_data;
-
-    poll_wait(file, &u->evtchn_wait, wait);
-    if ( u->ring_cons != u->ring_prod )
-        mask |= POLLIN | POLLRDNORM;
-    if ( u->ring_overflow )
-        mask = POLLERR;
-    return mask;
+       unsigned int mask = POLLOUT | POLLWRNORM;
+       struct per_user_data *u = file->private_data;
+
+       poll_wait(file, &u->evtchn_wait, wait);
+       if (u->ring_cons != u->ring_prod)
+               mask |= POLLIN | POLLRDNORM;
+       if (u->ring_overflow)
+               mask = POLLERR;
+       return mask;
 }
 
 static int evtchn_fasync(int fd, struct file *filp, int on)
 {
-    struct per_user_data *u = filp->private_data;
-    return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
+       struct per_user_data *u = filp->private_data;
+       return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
 }
 
 static int evtchn_open(struct inode *inode, struct file *filp)
 {
-    struct per_user_data *u;
-
-    if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
-        return -ENOMEM;
-
-    memset(u, 0, sizeof(*u));
-    init_waitqueue_head(&u->evtchn_wait);
-
-    if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
-    {
-        kfree(u);
-        return -ENOMEM;
-    }
-
-    filp->private_data = u;
-
-    return 0;
+       struct per_user_data *u;
+
+       if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
+               return -ENOMEM;
+
+       memset(u, 0, sizeof(*u));
+       init_waitqueue_head(&u->evtchn_wait);
+
+       if ((u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL)
+       {
+               kfree(u);
+               return -ENOMEM;
+       }
+
+       filp->private_data = u;
+
+       return 0;
 }
 
 static int evtchn_release(struct inode *inode, struct file *filp)
 {
-    int i;
-    struct per_user_data *u = filp->private_data;
-
-    spin_lock_irq(&port_user_lock);
-
-    free_page((unsigned long)u->ring);
-
-    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
-    {
-        if ( port_user[i] == u )
-        {
-            port_user[i] = NULL;
-            mask_evtchn(i);
-        }
-    }
-
-    spin_unlock_irq(&port_user_lock);
-
-    kfree(u);
-
-    return 0;
+       int i;
+       struct per_user_data *u = filp->private_data;
+
+       spin_lock_irq(&port_user_lock);
+
+       free_page((unsigned long)u->ring);
+
+       for (i = 0; i < NR_EVENT_CHANNELS; i++)
+       {
+               if (port_user[i] == u)
+               {
+                       port_user[i] = NULL;
+                       mask_evtchn(i);
+               }
+       }
+
+       spin_unlock_irq(&port_user_lock);
+
+       kfree(u);
+
+       return 0;
 }
 
 static struct file_operations evtchn_fops = {
-    .owner   = THIS_MODULE,
-    .read    = evtchn_read,
-    .write   = evtchn_write,
-    .ioctl   = evtchn_ioctl,
-    .poll    = evtchn_poll,
-    .fasync  = evtchn_fasync,
-    .open    = evtchn_open,
-    .release = evtchn_release,
+       .owner   = THIS_MODULE,
+       .read    = evtchn_read,
+       .write   = evtchn_write,
+       .ioctl   = evtchn_ioctl,
+       .poll    = evtchn_poll,
+       .fasync  = evtchn_fasync,
+       .open    = evtchn_open,
+       .release = evtchn_release,
 };
 
 static struct miscdevice evtchn_miscdev = {
-    .minor        = EVTCHN_MINOR,
-    .name         = "evtchn",
-    .fops         = &evtchn_fops,
+       .minor        = EVTCHN_MINOR,
+       .name         = "evtchn",
+       .fops         = &evtchn_fops,
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    .devfs_name   = "misc/evtchn",
+       .devfs_name   = "misc/evtchn",
 #endif
 };
 
 static int __init evtchn_init(void)
 {
-#ifdef OLD_DEVFS
-    devfs_handle_t symlink_handle;
-    int            pos;
-    char           link_dest[64];
-#endif
-    int err;
-
-    spin_lock_init(&port_user_lock);
-    memset(port_user, 0, sizeof(port_user));
-
-    /* (DEVFS) create '/dev/misc/evtchn'. */
-    err = misc_register(&evtchn_miscdev);
-    if ( err != 0 )
-    {
-        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
-        return err;
-    }
-
-#ifdef OLD_DEVFS
-    /* (DEVFS) create directory '/dev/xen'. */
-    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
-
-    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
-    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
-                              &link_dest[3], 
-                              sizeof(link_dest) - 3);
-    if ( pos >= 0 )
-        strncpy(&link_dest[pos], "../", 3);
-
-    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
-    (void)devfs_mk_symlink(xen_dev_dir, 
-                           "evtchn", 
-                           DEVFS_FL_DEFAULT, 
-                           &link_dest[pos],
-                           &symlink_handle, 
-                           NULL);
-
-    /* (DEVFS) automatically destroy the symlink with its destination. */
-    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
-#endif
-
-    printk("Event-channel device installed.\n");
-
-    return 0;
+       int err;
+
+       spin_lock_init(&port_user_lock);
+       memset(port_user, 0, sizeof(port_user));
+
+       /* (DEVFS) create '/dev/misc/evtchn'. */
+       err = misc_register(&evtchn_miscdev);
+       if (err != 0)
+       {
+               printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+               return err;
+       }
+
+       printk("Event-channel device installed.\n");
+
+       return 0;
 }
 
 static void evtchn_cleanup(void)
 {
-    misc_deregister(&evtchn_miscdev);
+       misc_deregister(&evtchn_miscdev);
 }
 
 module_init(evtchn_init);
 module_exit(evtchn_cleanup);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Sep 22 15:12:14 2005
@@ -62,9 +62,7 @@
        /* Private indexes into shared ring. */
        NETIF_RING_IDX rx_req_cons;
        NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
-#ifdef CONFIG_XEN_NETDEV_GRANT
        NETIF_RING_IDX rx_resp_prod_copy;
-#endif
        NETIF_RING_IDX tx_req_cons;
        NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
 
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Thu Sep 22 
15:12:14 2005
@@ -23,7 +23,7 @@
 static int  make_rx_response(netif_t *netif, 
                              u16      id, 
                              s8       st,
-                             unsigned long addr,
+                             u16      offset,
                              u16      size,
                              u16      csum_valid);
 
@@ -41,11 +41,7 @@
 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1];
 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
 static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
-#else
-static struct mmuext_op rx_mmuext[NETIF_RX_RING_SIZE];
-#endif
 static unsigned char rx_notify[NR_EVENT_CHANNELS];
 
 /* Don't currently gate addition of an interface to the tx scheduling list. */
@@ -72,14 +68,9 @@
 
 static struct sk_buff_head tx_queue;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
 static u16 grant_tx_ref[MAX_PENDING_REQS];
 static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
 static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
-
-#else
-static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
-#endif
 
 static struct list_head net_schedule_list;
 static spinlock_t net_schedule_list_lock;
@@ -108,7 +99,7 @@
        return mfn;
 }
 
-#ifndef CONFIG_XEN_NETDEV_GRANT
+#if 0
 static void free_mfn(unsigned long mfn)
 {
        unsigned long flags;
@@ -180,18 +171,7 @@
                dev_kfree_skb(skb);
                skb = nskb;
        }
-#ifdef CONFIG_XEN_NETDEV_GRANT
-#ifdef DEBUG_GRANT
-       printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d "
-              "id=%04x gr=%04x\n",
-              netif->rx->req_prod,
-              netif->rx_req_cons,
-              netif->rx->ring[
-                      MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
-              netif->rx->ring[
-                      MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
-#endif
-#endif
+
        netif->rx_req_cons++;
        netif_get(netif);
 
@@ -232,11 +212,7 @@
        u16 size, id, evtchn;
        multicall_entry_t *mcl;
        mmu_update_t *mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
        gnttab_transfer_t *gop;
-#else
-       struct mmuext_op *mmuext;
-#endif
        unsigned long vdata, old_mfn, new_mfn;
        struct sk_buff_head rxq;
        struct sk_buff *skb;
@@ -247,11 +223,7 @@
 
        mcl = rx_mcl;
        mmu = rx_mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
        gop = grant_rx_op;
-#else
-       mmuext = rx_mmuext;
-#endif
 
        while ((skb = skb_dequeue(&rx_queue)) != NULL) {
                netif   = netdev_priv(skb->dev);
@@ -277,25 +249,13 @@
                                        pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
                mcl++;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
                gop->mfn = old_mfn;
                gop->domid = netif->domid;
                gop->ref = netif->rx->ring[
                        MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
                netif->rx_resp_prod_copy++;
                gop++;
-#else
-               mcl->op = __HYPERVISOR_mmuext_op;
-               mcl->args[0] = (unsigned long)mmuext;
-               mcl->args[1] = 1;
-               mcl->args[2] = 0;
-               mcl->args[3] = netif->domid;
-               mcl++;
-
-               mmuext->cmd = MMUEXT_REASSIGN_PAGE;
-               mmuext->arg1.mfn = old_mfn;
-               mmuext++;
-#endif
+
                mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
                        MMU_MACHPHYS_UPDATE;
                mmu->val = __pa(vdata) >> PAGE_SHIFT;  
@@ -303,9 +263,6 @@
 
                __skb_queue_tail(&rxq, skb);
 
-#ifdef DEBUG_GRANT
-               dump_packet('a', old_mfn, vdata);
-#endif
                /* Filled the batch queue? */
                if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl))
                        break;
@@ -321,17 +278,12 @@
        mcl->args[3] = DOMID_SELF;
        mcl++;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
        mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#else
-       mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#endif
        BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
 
        mcl = rx_mcl;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-       if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
-                                    gop - grant_rx_op)) { 
+       if( HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
+                                     gop - grant_rx_op)) { 
                /*
                 * The other side has given us a bad grant ref, or has no 
                 * headroom, or has gone away. Unfortunately the current grant
@@ -343,20 +295,14 @@
                        grant_rx_op[0].domid, gop - grant_rx_op); 
        }
        gop = grant_rx_op;
-#else
-       mmuext = rx_mmuext;
-#endif
+
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
                netif   = netdev_priv(skb->dev);
                size    = skb->tail - skb->data;
 
                /* Rederive the machine addresses. */
                new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
-#ifdef CONFIG_XEN_NETDEV_GRANT
                old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
-#else
-               old_mfn = mmuext[0].arg1.mfn;
-#endif
                atomic_set(&(skb_shinfo(skb)->dataref), 1);
                skb_shinfo(skb)->nr_frags = 0;
                skb_shinfo(skb)->frag_list = NULL;
@@ -369,27 +315,17 @@
 
                /* Check the reassignment error code. */
                status = NETIF_RSP_OKAY;
-#ifdef CONFIG_XEN_NETDEV_GRANT
                if(gop->status != 0) { 
                        DPRINTK("Bad status %d from grant transfer to DOM%u\n",
                                gop->status, netif->domid);
                        /* XXX SMH: should free 'old_mfn' here */
                        status = NETIF_RSP_ERROR; 
                } 
-#else
-               if (unlikely(mcl[1].result != 0)) {
-                       DPRINTK("Failed MMU update transferring to DOM%u\n",
-                               netif->domid);
-                       free_mfn(old_mfn);
-                       status = NETIF_RSP_ERROR;
-               }
-#endif
                evtchn = netif->evtchn;
                id = netif->rx->ring[
                        MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
                if (make_rx_response(netif, id, status,
-                                    (old_mfn << PAGE_SHIFT) | /* XXX */
-                                    ((unsigned long)skb->data & ~PAGE_MASK),
+                                    (unsigned long)skb->data & ~PAGE_MASK,
                                     size, skb->proto_csum_valid) &&
                    (rx_notify[evtchn] == 0)) {
                        rx_notify[evtchn] = 1;
@@ -398,13 +334,8 @@
 
                netif_put(netif);
                dev_kfree_skb(skb);
-#ifdef CONFIG_XEN_NETDEV_GRANT
                mcl++;
                gop++;
-#else
-               mcl += 2;
-               mmuext += 1;
-#endif
        }
 
        while (notify_nr != 0) {
@@ -486,11 +417,7 @@
 
 inline static void net_tx_action_dealloc(void)
 {
-#ifdef CONFIG_XEN_NETDEV_GRANT
        gnttab_unmap_grant_ref_t *gop;
-#else
-       multicall_entry_t *mcl;
-#endif
        u16 pending_idx;
        PEND_RING_IDX dc, dp;
        netif_t *netif;
@@ -498,7 +425,6 @@
        dc = dealloc_cons;
        dp = dealloc_prod;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
        /*
         * Free up any grants we have finished using
         */
@@ -513,26 +439,8 @@
        }
        BUG_ON(HYPERVISOR_grant_table_op(
                GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
-#else
-       mcl = tx_mcl;
-       while (dc != dp) {
-               pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-               MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
-                                       __pte(0), 0);
-               mcl++;     
-       }
-
-       mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-       BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
-
-       mcl = tx_mcl;
-#endif
+
        while (dealloc_cons != dp) {
-#ifndef CONFIG_XEN_NETDEV_GRANT
-               /* The update_va_mapping() must not fail. */
-               BUG_ON(mcl[0].result != 0);
-#endif
-
                pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
 
                netif = pending_tx_info[pending_idx].netif;
@@ -556,10 +464,6 @@
                        add_to_net_schedule_list_tail(netif);
         
                netif_put(netif);
-
-#ifndef CONFIG_XEN_NETDEV_GRANT
-               mcl++;
-#endif
        }
 }
 
@@ -572,21 +476,13 @@
        netif_tx_request_t txreq;
        u16 pending_idx;
        NETIF_RING_IDX i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
        gnttab_map_grant_ref_t *mop;
-#else
-       multicall_entry_t *mcl;
-#endif
        unsigned int data_len;
 
        if (dealloc_cons != dealloc_prod)
                net_tx_action_dealloc();
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
        mop = tx_map_ops;
-#else
-       mcl = tx_mcl;
-#endif
        while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
                !list_empty(&net_schedule_list)) {
                /* Get a netif from the list with work to do. */
@@ -657,8 +553,7 @@
                }
 
                /* No crossing a page as the payload mustn't fragment. */
-               if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >=
-                            PAGE_SIZE)) {
+               if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
                        DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
                                txreq.addr, txreq.size, 
                                (txreq.addr &~PAGE_MASK) + txreq.size);
@@ -682,20 +577,12 @@
 
                /* Packets passed to netif_rx() must have some headroom. */
                skb_reserve(skb, 16);
-#ifdef CONFIG_XEN_NETDEV_GRANT
+
                mop->host_addr = MMAP_VADDR(pending_idx);
                mop->dom       = netif->domid;
-               mop->ref       = txreq.addr >> PAGE_SHIFT;
+               mop->ref       = txreq.gref;
                mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
                mop++;
-#else
-               MULTI_update_va_mapping_otherdomain(
-                       mcl, MMAP_VADDR(pending_idx),
-                       pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
-                       0, netif->domid);
-
-               mcl++;
-#endif
 
                memcpy(&pending_tx_info[pending_idx].req,
                       &txreq, sizeof(txreq));
@@ -706,17 +593,10 @@
 
                pending_cons++;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
                if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
                        break;
-#else
-               /* Filled the batch queue? */
-               if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl))
-                       break;
-#endif
-       }
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
+       }
+
        if (mop == tx_map_ops)
                return;
 
@@ -724,14 +604,6 @@
                GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
 
        mop = tx_map_ops;
-#else
-       if (mcl == tx_mcl)
-               return;
-
-       BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
-
-       mcl = tx_mcl;
-#endif
        while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
                pending_idx = *((u16 *)skb->data);
                netif       = pending_tx_info[pending_idx].netif;
@@ -739,7 +611,6 @@
                       sizeof(txreq));
 
                /* Check the remap error code. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
                if (unlikely(mop->handle < 0)) {
                        printk(KERN_ALERT "#### netback grant fails\n");
                        make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
@@ -754,30 +625,13 @@
                        __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
                        FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
                grant_tx_ref[pending_idx] = mop->handle;
-#else
-               if (unlikely(mcl[0].result != 0)) {
-                       DPRINTK("Bad page frame\n");
-                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-                       netif_put(netif);
-                       kfree_skb(skb);
-                       mcl++;
-                       pending_ring[MASK_PEND_IDX(pending_prod++)] =
-                               pending_idx;
-                       continue;
-               }
-
-               phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >>
-                                      PAGE_SHIFT] =
-                       FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
-#endif
 
                data_len = (txreq.size > PKT_PROT_LEN) ?
                        PKT_PROT_LEN : txreq.size;
 
                __skb_put(skb, data_len);
                memcpy(skb->data, 
-                      (void *)(MMAP_VADDR(pending_idx)|
-                               (txreq.addr&~PAGE_MASK)),
+                      (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
                       data_len);
                if (data_len < txreq.size) {
                        /* Append the packet payload as a fragment. */
@@ -786,7 +640,7 @@
                        skb_shinfo(skb)->frags[0].size        =
                                txreq.size - data_len;
                        skb_shinfo(skb)->frags[0].page_offset = 
-                               (txreq.addr + data_len) & ~PAGE_MASK;
+                               txreq.offset + data_len;
                        skb_shinfo(skb)->nr_frags = 1;
                } else {
                        /* Schedule a response immediately. */
@@ -813,11 +667,7 @@
                netif_rx(skb);
                netif->dev->last_rx = jiffies;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
                mop++;
-#else
-               mcl++;
-#endif
        }
 }
 
@@ -874,7 +724,7 @@
 static int make_rx_response(netif_t *netif, 
                             u16      id, 
                             s8       st,
-                            unsigned long addr,
+                            u16      offset,
                             u16      size,
                             u16      csum_valid)
 {
@@ -882,7 +732,7 @@
        netif_rx_response_t *resp;
 
        resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-       resp->addr       = addr;
+       resp->offset     = offset;
        resp->csum_valid = csum_valid;
        resp->id         = id;
        resp->status     = (s16)size;
@@ -937,9 +787,6 @@
                return 0;
 
        IPRINTK("Initialising Xen netif backend.\n");
-#ifdef CONFIG_XEN_NETDEV_GRANT
-       IPRINTK("Using grant tables.\n");
-#endif
 
        /* We can increase reservation by this much in net_rx_action(). */
        balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Sep 22 
15:12:14 2005
@@ -256,8 +256,8 @@
                for (i = np->tx_resp_cons; i != prod; i++) {
                        id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
                        skb = np->tx_skbs[id];
-#ifdef CONFIG_XEN_NETDEV_GRANT
-                       if 
(unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) {
+                       if (unlikely(gnttab_query_foreign_access(
+                               np->grant_tx_ref[id]) != 0)) {
                                printk(KERN_ALERT "network_tx_buf_gc: warning "
                                       "-- grant still in use by backend "
                                       "domain.\n");
@@ -268,7 +268,6 @@
                        gnttab_release_grant_reference(
                                &np->gref_tx_head, np->grant_tx_ref[id]);
                        np->grant_tx_ref[id] = GRANT_INVALID_REF;
-#endif
                        ADD_ID_TO_FREELIST(np->tx_skbs, id);
                        dev_kfree_skb_irq(skb);
                }
@@ -287,10 +286,7 @@
                mb();
        } while (prod != np->tx->resp_prod);
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
  out: 
-#endif
-
        if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
                np->tx_full = 0;
                if (np->user_state == UST_OPEN)
@@ -307,9 +303,7 @@
        int i, batch_target;
        NETIF_RING_IDX req_prod = np->rx->req_prod;
        struct xen_memory_reservation reservation;
-#ifdef CONFIG_XEN_NETDEV_GRANT
        grant_ref_t ref;
-#endif
 
        if (unlikely(np->backend_state != BEST_CONNECTED))
                return;
@@ -343,13 +337,11 @@
                np->rx_skbs[id] = skb;
         
                np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
-#ifdef CONFIG_XEN_NETDEV_GRANT
                ref = gnttab_claim_grant_reference(&np->gref_rx_head);
                BUG_ON((signed short)ref < 0);
                np->grant_rx_ref[id] = ref;
                gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
                np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
-#endif
                rx_pfn_array[i] = virt_to_mfn(skb->head);
 
                /* Remove this page from map before passing back to Xen. */
@@ -400,10 +392,8 @@
        struct net_private *np = netdev_priv(dev);
        netif_tx_request_t *tx;
        NETIF_RING_IDX i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
        grant_ref_t ref;
        unsigned long mfn;
-#endif
 
        if (unlikely(np->tx_full)) {
                printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
@@ -439,18 +429,13 @@
        tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
 
        tx->id   = id;
-#ifdef CONFIG_XEN_NETDEV_GRANT
        ref = gnttab_claim_grant_reference(&np->gref_tx_head);
        BUG_ON((signed short)ref < 0);
        mfn = virt_to_mfn(skb->data);
        gnttab_grant_foreign_access_ref(
                ref, np->backend_id, mfn, GNTMAP_readonly);
-       tx->addr = ref << PAGE_SHIFT;
-       np->grant_tx_ref[id] = ref;
-#else
-       tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
-#endif
-       tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
+       tx->gref = np->grant_tx_ref[id] = ref;
+       tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
        tx->size = skb->len;
        tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
 
@@ -511,10 +496,8 @@
        int work_done, budget, more_to_do = 1;
        struct sk_buff_head rxq;
        unsigned long flags;
-#ifdef CONFIG_XEN_NETDEV_GRANT
        unsigned long mfn;
        grant_ref_t ref;
-#endif
 
        spin_lock(&np->rx_lock);
 
@@ -550,7 +533,6 @@
                        continue;
                }
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
                ref = np->grant_rx_ref[rx->id]; 
 
                if(ref == GRANT_INVALID_REF) { 
@@ -568,17 +550,12 @@
                np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
                mfn = gnttab_end_foreign_transfer_ref(ref);
                gnttab_release_grant_reference(&np->gref_rx_head, ref);
-#endif
 
                skb = np->rx_skbs[rx->id];
                ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
 
                /* NB. We handle skb overflow later. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-               skb->data = skb->head + rx->addr;
-#else
-               skb->data = skb->head + (rx->addr & ~PAGE_MASK);
-#endif
+               skb->data = skb->head + rx->offset;
                skb->len  = rx->status;
                skb->tail = skb->data + skb->len;
 
@@ -589,30 +566,14 @@
                np->stats.rx_bytes += rx->status;
 
                /* Remap the page. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
                mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-#else
-               mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
-#endif
                mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
                mmu++;
-#ifdef CONFIG_XEN_NETDEV_GRANT
                MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
                                        pfn_pte_ma(mfn, PAGE_KERNEL), 0);
-#else
-               MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
-                                       pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
-                                                  PAGE_KERNEL), 0);
-#endif
                mcl++;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
                phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
-#else
-               phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
-                       rx->addr >> PAGE_SHIFT;
-#endif 
-
 
                __skb_queue_tail(&rxq, skb);
        }
@@ -773,16 +734,12 @@
                        tx = &np->tx->ring[requeue_idx++].req;
 
                        tx->id   = i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
                        gnttab_grant_foreign_access_ref(
                                np->grant_tx_ref[i], np->backend_id, 
                                virt_to_mfn(np->tx_skbs[i]->data),
                                GNTMAP_readonly); 
-                       tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 
-#else
-                       tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
-#endif
-                       tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
+                       tx->gref = np->grant_tx_ref[i];
+                       tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
                        tx->size = skb->len;
 
                        np->stats.tx_bytes += skb->len;
@@ -795,12 +752,10 @@
        /* Rebuild the RX buffer freelist and the RX ring itself. */
        for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
                if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
-#ifdef CONFIG_XEN_NETDEV_GRANT 
                        gnttab_grant_foreign_transfer_ref(
                                np->grant_rx_ref[i], np->backend_id);
                        np->rx->ring[requeue_idx].req.gref =
                                np->grant_rx_ref[i];
-#endif
                        np->rx->ring[requeue_idx].req.id = i;
                        requeue_idx++; 
                }
@@ -862,11 +817,9 @@
 
 static void netif_uninit(struct net_device *dev)
 {
-#ifdef CONFIG_XEN_NETDEV_GRANT
        struct net_private *np = netdev_priv(dev);
        gnttab_free_grant_references(np->gref_tx_head);
        gnttab_free_grant_references(np->gref_rx_head);
-#endif
 }
 
 static struct ethtool_ops network_ethtool_ops =
@@ -911,19 +864,14 @@
        /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
        for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
                np->tx_skbs[i] = (void *)((unsigned long) i+1);
-#ifdef CONFIG_XEN_NETDEV_GRANT
                np->grant_tx_ref[i] = GRANT_INVALID_REF;
-#endif
        }
 
        for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
                np->rx_skbs[i] = (void *)((unsigned long) i+1);
-#ifdef CONFIG_XEN_NETDEV_GRANT
                np->grant_rx_ref[i] = GRANT_INVALID_REF;
-#endif
-       }
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
+       }
+
        /* A grant for every tx ring slot */
        if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
                                          &np->gref_tx_head) < 0) {
@@ -937,7 +885,6 @@
                gnttab_free_grant_references(np->gref_tx_head);
                goto exit;
        }
-#endif
 
        netdev->open            = network_open;
        netdev->hard_start_xmit = network_start_xmit;
@@ -971,10 +918,8 @@
        return err;
 
  exit_free_grefs:
-#ifdef CONFIG_XEN_NETDEV_GRANT
        gnttab_free_grant_references(np->gref_tx_head);
        gnttab_free_grant_references(np->gref_rx_head);
-#endif
        goto exit;
 }
 
@@ -1024,10 +969,8 @@
        evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
        int err;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
        info->tx_ring_ref = GRANT_INVALID_REF;
        info->rx_ring_ref = GRANT_INVALID_REF;
-#endif
 
        info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
        if (info->tx == 0) {
@@ -1045,7 +988,6 @@
        memset(info->rx, 0, PAGE_SIZE);
        info->backend_state = BEST_DISCONNECTED;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
        err = gnttab_grant_foreign_access(info->backend_id,
                                          virt_to_mfn(info->tx), 0);
        if (err < 0) {
@@ -1061,11 +1003,6 @@
                goto out;
        }
        info->rx_ring_ref = err;
-
-#else
-       info->tx_ring_ref = virt_to_mfn(info->tx);
-       info->rx_ring_ref = virt_to_mfn(info->rx);
-#endif
 
        op.u.alloc_unbound.dom = info->backend_id;
        err = HYPERVISOR_event_channel_op(&op);
@@ -1084,7 +1021,6 @@
                free_page((unsigned long)info->rx);
        info->rx = 0;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
        if (info->tx_ring_ref != GRANT_INVALID_REF)
                gnttab_end_foreign_access(info->tx_ring_ref, 0);
        info->tx_ring_ref = GRANT_INVALID_REF;
@@ -1092,7 +1028,6 @@
        if (info->rx_ring_ref != GRANT_INVALID_REF)
                gnttab_end_foreign_access(info->rx_ring_ref, 0);
        info->rx_ring_ref = GRANT_INVALID_REF;
-#endif
 
        return err;
 }
@@ -1106,7 +1041,6 @@
                free_page((unsigned long)info->rx);
        info->rx = 0;
 
-#ifdef CONFIG_XEN_NETDEV_GRANT
        if (info->tx_ring_ref != GRANT_INVALID_REF)
                gnttab_end_foreign_access(info->tx_ring_ref, 0);
        info->tx_ring_ref = GRANT_INVALID_REF;
@@ -1114,7 +1048,6 @@
        if (info->rx_ring_ref != GRANT_INVALID_REF)
                gnttab_end_foreign_access(info->rx_ring_ref, 0);
        info->rx_ring_ref = GRANT_INVALID_REF;
-#endif
 
        unbind_evtchn_from_irqhandler(info->evtchn, info->netdev);
        info->evtchn = 0;
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c
--- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Thu Sep 22 
15:12:14 2005
@@ -41,232 +41,253 @@
 static int privcmd_ioctl(struct inode *inode, struct file *file,
                          unsigned int cmd, unsigned long data)
 {
-    int ret = -ENOSYS;
-
-    switch ( cmd )
-    {
-    case IOCTL_PRIVCMD_HYPERCALL:
-    {
-        privcmd_hypercall_t hypercall;
+       int ret = -ENOSYS;
+
+       switch (cmd) {
+       case IOCTL_PRIVCMD_HYPERCALL: {
+               privcmd_hypercall_t hypercall;
   
-        if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
-            return -EFAULT;
+               if (copy_from_user(&hypercall, (void *)data,
+                                  sizeof(hypercall)))
+                       return -EFAULT;
 
 #if defined(__i386__)
-        __asm__ __volatile__ (
-            "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
-            "movl  4(%%eax),%%ebx ;"
-            "movl  8(%%eax),%%ecx ;"
-            "movl 12(%%eax),%%edx ;"
-            "movl 16(%%eax),%%esi ;"
-            "movl 20(%%eax),%%edi ;"
-            "movl   (%%eax),%%eax ;"
-            TRAP_INSTR "; "
-            "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
-            : "=a" (ret) : "0" (&hypercall) : "memory" );
+               __asm__ __volatile__ (
+                       "pushl %%ebx; pushl %%ecx; pushl %%edx; "
+                       "pushl %%esi; pushl %%edi; "
+                       "movl  4(%%eax),%%ebx ;"
+                       "movl  8(%%eax),%%ecx ;"
+                       "movl 12(%%eax),%%edx ;"
+                       "movl 16(%%eax),%%esi ;"
+                       "movl 20(%%eax),%%edi ;"
+                       "movl   (%%eax),%%eax ;"
+                       TRAP_INSTR "; "
+                       "popl %%edi; popl %%esi; popl %%edx; "
+                       "popl %%ecx; popl %%ebx"
+                       : "=a" (ret) : "0" (&hypercall) : "memory" );
 #elif defined (__x86_64__)
-        {
-            long ign1, ign2, ign3;
-            __asm__ __volatile__ (
-                "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
-                : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3)
-                : "0" ((unsigned long)hypercall.op), 
-                "1" ((unsigned long)hypercall.arg[0]), 
-                "2" ((unsigned long)hypercall.arg[1]),
-                "3" ((unsigned long)hypercall.arg[2]), 
-                "g" ((unsigned long)hypercall.arg[3]),
-                "g" ((unsigned long)hypercall.arg[4])
-                : "r11","rcx","r8","r10","memory");
-        }
+               {
+                       long ign1, ign2, ign3;
+                       __asm__ __volatile__ (
+                               "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
+                               : "=a" (ret), "=D" (ign1),
+                                 "=S" (ign2), "=d" (ign3)
+                               : "0" ((unsigned long)hypercall.op), 
+                               "1" ((unsigned long)hypercall.arg[0]), 
+                               "2" ((unsigned long)hypercall.arg[1]),
+                               "3" ((unsigned long)hypercall.arg[2]), 
+                               "g" ((unsigned long)hypercall.arg[3]),
+                               "g" ((unsigned long)hypercall.arg[4])
+                               : "r11","rcx","r8","r10","memory");
+               }
 #elif defined (__ia64__)
-       __asm__ __volatile__ (
-           ";; mov r14=%2; mov r15=%3; mov r16=%4; mov r17=%5; mov r18=%6;"
-           "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
-           : "=r" (ret)
-           : "r" (hypercall.op),
-             "r" (hypercall.arg[0]),
-             "r" (hypercall.arg[1]),
-             "r" (hypercall.arg[2]),
-             "r" (hypercall.arg[3]),
-             "r" (hypercall.arg[4])
-           : "r14","r15","r16","r17","r18","r2","r8","memory");
+               __asm__ __volatile__ (
+                       ";; mov r14=%2; mov r15=%3; "
+                       "mov r16=%4; mov r17=%5; mov r18=%6;"
+                       "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
+                       : "=r" (ret)
+                       : "r" (hypercall.op),
+                       "r" (hypercall.arg[0]),
+                       "r" (hypercall.arg[1]),
+                       "r" (hypercall.arg[2]),
+                       "r" (hypercall.arg[3]),
+                       "r" (hypercall.arg[4])
+                       : "r14","r15","r16","r17","r18","r2","r8","memory");
 #endif
-    }
-    break;
+       }
+       break;
 
 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
-    case IOCTL_PRIVCMD_MMAP:
-    {
+       case IOCTL_PRIVCMD_MMAP: {
 #define PRIVCMD_MMAP_SZ 32
-        privcmd_mmap_t mmapcmd;
-        privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
-        int i, rc;
-
-        if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
-            return -EFAULT;
-
-        p = mmapcmd.entry;
-
-        for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
-        {
-            int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
-                PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
-
-
-            if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
-                return -EFAULT;
+               privcmd_mmap_t mmapcmd;
+               privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
+               int i, rc;
+
+               if (copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)))
+                       return -EFAULT;
+
+               p = mmapcmd.entry;
+
+               for (i = 0; i < mmapcmd.num;
+                    i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
+                       int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
+                               PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+
+                       if (copy_from_user(&msg, p,
+                                          n*sizeof(privcmd_mmap_entry_t)))
+                               return -EFAULT;
      
-            for ( j = 0; j < n; j++ )
-            {
-                struct vm_area_struct *vma = 
-                    find_vma( current->mm, msg[j].va );
-
-                if ( !vma )
-                    return -EINVAL;
-
-                if ( msg[j].va > PAGE_OFFSET )
-                    return -EINVAL;
-
-                if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
-                    return -EINVAL;
-
-                if ( (rc = direct_remap_pfn_range(vma,
-                                                  msg[j].va&PAGE_MASK, 
-                                                  msg[j].mfn, 
-                                                  msg[j].npages<<PAGE_SHIFT, 
-                                                  vma->vm_page_prot,
-                                                  mmapcmd.dom)) < 0 )
-                    return rc;
-            }
-        }
-        ret = 0;
-    }
-    break;
-
-    case IOCTL_PRIVCMD_MMAPBATCH:
-    {
-        mmu_update_t u;
-        privcmd_mmapbatch_t m;
-        struct vm_area_struct *vma = NULL;
-        unsigned long *p, addr;
-        unsigned long mfn, ptep;
-        int i;
-
-        if ( copy_from_user(&m, (void *)data, sizeof(m)) )
-        { ret = -EFAULT; goto batch_err; }
-
-        vma = find_vma( current->mm, m.addr );
-
-        if ( !vma )
-        { ret = -EINVAL; goto batch_err; }
-
-        if ( m.addr > PAGE_OFFSET )
-        { ret = -EFAULT; goto batch_err; }
-
-        if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
-        { ret = -EFAULT; goto batch_err; }
-
-        p = m.arr;
-        addr = m.addr;
-        for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
-        {
-            if ( get_user(mfn, p) )
-                return -EFAULT;
-
-            ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
-            if (ret)
-                goto batch_err;
-
-            u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
-            u.ptr = ptep;
-
-            if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
-                put_user(0xF0000000 | mfn, p);
-        }
-
-        ret = 0;
-        break;
-
-    batch_err:
-        printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 
-               ret, vma, m.addr, m.num, m.arr,
-               vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
-        break;
-    }
-    break;
+                       for (j = 0; j < n; j++) {
+                               struct vm_area_struct *vma = 
+                                       find_vma( current->mm, msg[j].va );
+
+                               if (!vma)
+                                       return -EINVAL;
+
+                               if (msg[j].va > PAGE_OFFSET)
+                                       return -EINVAL;
+
+                               if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
+                                   > vma->vm_end )
+                                       return -EINVAL;
+
+                               if ((rc = direct_remap_pfn_range(
+                                       vma,
+                                       msg[j].va&PAGE_MASK, 
+                                       msg[j].mfn, 
+                                       msg[j].npages<<PAGE_SHIFT, 
+                                       vma->vm_page_prot,
+                                       mmapcmd.dom)) < 0)
+                                       return rc;
+                       }
+               }
+               ret = 0;
+       }
+       break;
+
+       case IOCTL_PRIVCMD_MMAPBATCH: {
+               mmu_update_t u;
+               privcmd_mmapbatch_t m;
+               struct vm_area_struct *vma = NULL;
+               unsigned long *p, addr;
+               unsigned long mfn, ptep;
+               int i;
+
+               if (copy_from_user(&m, (void *)data, sizeof(m))) {
+                       ret = -EFAULT;
+                       goto batch_err;
+               }
+
+               vma = find_vma( current->mm, m.addr );
+               if (!vma) {
+                       ret = -EINVAL;
+                       goto batch_err;
+               }
+
+               if (m.addr > PAGE_OFFSET) {
+                       ret = -EFAULT;
+                       goto batch_err;
+               }
+
+               if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
+                       ret = -EFAULT;
+                       goto batch_err;
+               }
+
+               p = m.arr;
+               addr = m.addr;
+               for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
+                       if (get_user(mfn, p))
+                               return -EFAULT;
+
+                       ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
+                       if (ret)
+                               goto batch_err;
+
+                       u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
+                       u.ptr = ptep;
+
+                       if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
+                               put_user(0xF0000000 | mfn, p);
+               }
+
+               ret = 0;
+               break;
+
+       batch_err:
+               printk("batch_err ret=%d vma=%p addr=%lx "
+                      "num=%d arr=%p %lx-%lx\n", 
+                      ret, vma, m.addr, m.num, m.arr,
+                      vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
+               break;
+       }
+       break;
 #endif
 
-    case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
-    {
-        unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
-        pgd_t *pgd = pgd_offset_k(m2pv);
-        pud_t *pud = pud_offset(pgd, m2pv);
-        pmd_t *pmd = pmd_offset(pud, m2pv);
-        unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT; 
-        ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
-    }
-    break;
-
-    case IOCTL_PRIVCMD_INITDOMAIN_STORE:
-    {
-        extern int do_xenbus_probe(void*);
-        unsigned long page;
-
-        if (xen_start_info->store_evtchn != 0) {
-            ret = xen_start_info->store_mfn;
-            break;
-        }
-
-        /* Allocate page. */
-        page = get_zeroed_page(GFP_KERNEL);
-        if (!page) {
-            ret = -ENOMEM;
-            break;
-        }
-
-        /* We don't refcnt properly, so set reserved on page.
-         * (this allocation is permanent) */
-        SetPageReserved(virt_to_page(page));
-
-        /* Initial connect. Setup channel and page. */
-        xen_start_info->store_evtchn = data;
-        xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >>
-                                              PAGE_SHIFT);
-        ret = xen_start_info->store_mfn;
-
-        /* We'll return then this will wait for daemon to answer */
-        kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
-    }
-    break;
-
-    default:
-        ret = -EINVAL;
-        break;
-    }
-    return ret;
+       case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
+               unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
+               pgd_t *pgd = pgd_offset_k(m2pv);
+               pud_t *pud = pud_offset(pgd, m2pv);
+               pmd_t *pmd = pmd_offset(pud, m2pv);
+               unsigned long m2p_start_mfn =
+                       (*(unsigned long *)pmd) >> PAGE_SHIFT; 
+               ret = put_user(m2p_start_mfn, (unsigned long *)data) ?
+                       -EFAULT: 0;
+       }
+       break;
+
+       case IOCTL_PRIVCMD_INITDOMAIN_STORE: {
+               extern int do_xenbus_probe(void*);
+               unsigned long page;
+
+               if (xen_start_info->store_evtchn != 0) {
+                       ret = xen_start_info->store_mfn;
+                       break;
+               }
+
+               /* Allocate page. */
+               page = get_zeroed_page(GFP_KERNEL);
+               if (!page) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               /* We don't refcnt properly, so set reserved on page.
+                * (this allocation is permanent) */
+               SetPageReserved(virt_to_page(page));
+
+               /* Initial connect. Setup channel and page. */
+               xen_start_info->store_evtchn = data;
+               xen_start_info->store_mfn =
+                       pfn_to_mfn(virt_to_phys((void *)page) >>
+                                  PAGE_SHIFT);
+               ret = xen_start_info->store_mfn;
+
+               /* We'll return then this will wait for daemon to answer */
+               kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
+       }
+       break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
 }
 
 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
 {
-    /* DONTCOPY is essential for Xen as copy_page_range is broken. */
-    vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-
-    return 0;
+       /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+
+       return 0;
 }
 
 static struct file_operations privcmd_file_ops = {
-    .ioctl = privcmd_ioctl,
-    .mmap  = privcmd_mmap,
+       .ioctl = privcmd_ioctl,
+       .mmap  = privcmd_mmap,
 };
 
 
 static int __init privcmd_init(void)
 {
-    privcmd_intf = create_xen_proc_entry("privcmd", 0400);
-    if ( privcmd_intf != NULL )
-        privcmd_intf->proc_fops = &privcmd_file_ops;
-
-    return 0;
+       privcmd_intf = create_xen_proc_entry("privcmd", 0400);
+       if (privcmd_intf != NULL)
+               privcmd_intf->proc_fops = &privcmd_file_ops;
+
+       return 0;
 }
 
 __initcall(privcmd_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/tpmback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Thu Sep 22 15:12:14 2005
@@ -84,3 +84,13 @@
 #define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
 
 #endif /* __TPMIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c        Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c        Thu Sep 22 
15:12:14 2005
@@ -566,7 +566,7 @@
                                 * the more time we give the TPM to process the 
request.
                                 */
                                mod_timer(&pak->processing_timer,
-                                         jiffies + (num_frontends * 10 * HZ));
+                                         jiffies + (num_frontends * 60 * HZ));
                                dataex.copied_so_far = 0;
                        }
                }
@@ -850,7 +850,7 @@
                write_lock_irqsave(&dataex.pak_lock, flags);
                list_add_tail(&pak->next, &dataex.pending_pak);
                /* give the TPM some time to pick up the request */
-               mod_timer(&pak->processing_timer, jiffies + (10 * HZ));
+               mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
                write_unlock_irqrestore(&dataex.pak_lock,
                                        flags);
 
@@ -1075,3 +1075,13 @@
 }
 
 __initcall(tpmback_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Thu Sep 22 15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Thu Sep 22 15:12:14 2005
@@ -268,3 +268,13 @@
 {
        xenbus_register_backend(&tpmback);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c      Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c      Thu Sep 22 
15:12:14 2005
@@ -741,3 +741,13 @@
 }
 
 __initcall(tpmif_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h      Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h      Thu Sep 22 
15:12:14 2005
@@ -38,3 +38,13 @@
 };
 
 #endif
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c    Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c    Thu Sep 22 
15:12:14 2005
@@ -231,3 +231,13 @@
 
        unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h    Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h    Thu Sep 22 
15:12:14 2005
@@ -39,3 +39,13 @@
 extern wait_queue_head_t xb_waitq;
 
 #endif /* _XENBUS_COMMS_H */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c      Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c      Thu Sep 22 
15:12:14 2005
@@ -186,3 +186,13 @@
 }
 
 __initcall(xenbus_dev_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c    Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c    Thu Sep 22 
15:12:14 2005
@@ -687,3 +687,13 @@
 }
 
 postcore_initcall(xenbus_probe_init);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Thu Sep 22 
15:05:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Thu Sep 22 
15:12:14 2005
@@ -566,3 +566,13 @@
                return PTR_ERR(watcher);
        return 0;
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r eba5afe9aa37 -r 10759a44ce3b tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Thu Sep 22 15:05:44 2005
+++ b/tools/python/xen/xend/XendDomain.py       Thu Sep 22 15:12:14 2005
@@ -305,6 +305,13 @@
 
         @param vmconfig: vm configuration
         """
+        # We accept our configuration specified as ['config' [...]], which
+        # some tools or configuration files may be using.  For save-restore,
+        # we use the value of XendDomainInfo.sxpr() directly, which has no
+        # such item.
+        nested = sxp.child_value(config, 'config')
+        if nested:
+            config = nested
         return XendDomainInfo.restore(self.dbmap.getPath(), config)
 
     def domain_restore(self, src, progress=False):
diff -r eba5afe9aa37 -r 10759a44ce3b xen/include/public/io/netif.h
--- a/xen/include/public/io/netif.h     Thu Sep 22 15:05:44 2005
+++ b/xen/include/public/io/netif.h     Thu Sep 22 15:12:14 2005
@@ -10,10 +10,11 @@
 #define __XEN_PUBLIC_IO_NETIF_H__
 
 typedef struct netif_tx_request {
-    unsigned long addr;   /* Machine address of packet.  */
+    grant_ref_t gref;      /* Reference to buffer page */
+    u16      offset:15;    /* Offset within buffer page */
     u16      csum_blank:1; /* Proto csum field blank?   */
-    u16      id:15;  /* Echoed in response message. */
-    u16      size;   /* Packet size in bytes.       */
+    u16      id;           /* Echoed in response message. */
+    u16      size;         /* Packet size in bytes.       */
 } netif_tx_request_t;
 
 typedef struct netif_tx_response {
@@ -22,21 +23,15 @@
 } netif_tx_response_t;
 
 typedef struct {
-    u16       id;    /* Echoed in response message.        */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    grant_ref_t gref;  /* 2: Reference to incoming granted frame */
-#endif
+    u16       id;       /* Echoed in response message.        */
+    grant_ref_t gref;  /* Reference to incoming granted frame */
 } netif_rx_request_t;
 
 typedef struct {
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    u32      addr;   /*  0: Offset in page of start of received packet  */
-#else
-    unsigned long addr; /* Machine address of packet.              */
-#endif
-    u16      csum_valid:1; /* Protocol checksum is validated?       */
-    u16      id:15;
-    s16      status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+    u16      offset;     /* Offset in page of start of received packet  */
+    u16      csum_valid; /* Protocol checksum is validated?       */
+    u16      id;
+    s16      status;     /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
 } netif_rx_response_t;
 
 /*
@@ -53,18 +48,8 @@
 #define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1))
 #define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1))
 
-#ifdef __x86_64__
-/*
- * This restriction can be lifted when we move netfront/netback to use
- * grant tables. This will remove memory_t fields from the above structures
- * and thus relax natural alignment restrictions.
- */
-#define NETIF_TX_RING_SIZE 128
-#define NETIF_RX_RING_SIZE 128
-#else
 #define NETIF_TX_RING_SIZE 256
 #define NETIF_RX_RING_SIZE 256
-#endif
 
 /* This structure must fit in a memory page. */
 typedef struct netif_tx_interface {
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/usbback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/usbback/common.h Thu Sep 22 15:05:44 2005
+++ /dev/null   Thu Sep 22 15:12:14 2005
@@ -1,84 +0,0 @@
-
-#ifndef __USBIF__BACKEND__COMMON_H__
-#define __USBIF__BACKEND__COMMON_H__
-
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/rbtree.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-#include <asm/pgalloc.h>
-#include <asm/hypervisor.h>
-#include <asm-xen/driver_util.h>
-#include <asm-xen/xen-public/io/usbif.h>
-
-#if 0
-#define ASSERT(_p) \
-    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
-    __LINE__, __FILE__); *(int*)0=0; }
-#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
-                           __FILE__ , __LINE__ , ## _a )
-#else
-#define ASSERT(_p) ((void)0)
-#define DPRINTK(_f, _a...) ((void)0)
-#endif
-
-typedef struct usbif_priv_st usbif_priv_t;
-
-struct usbif_priv_st {
-    /* Unique identifier for this interface. */
-    domid_t          domid;
-    unsigned int     handle;
-    /* Physical parameters of the comms window. */
-    unsigned long    shmem_frame;
-    unsigned int     evtchn;
-    /* Comms Information */
-    usbif_back_ring_t usb_ring;
-    struct vm_struct *usb_ring_area;
-    /* Private fields. */
-    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-    /*
-     * DISCONNECT response is deferred until pending requests are ack'ed.
-     * We therefore need to store the id from the original request.
-     */
-    u8                   disconnect_rspid;
-    usbif_priv_t        *hash_next;
-    struct list_head     usbif_list;
-    spinlock_t           usb_ring_lock;
-    atomic_t             refcnt;
-
-    struct work_struct work;
-};
-
-void usbif_create(usbif_be_create_t *create);
-void usbif_destroy(usbif_be_destroy_t *destroy);
-void usbif_connect(usbif_be_connect_t *connect);
-int  usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id);
-void usbif_disconnect_complete(usbif_priv_t *up);
-
-void usbif_release_port(usbif_be_release_port_t *msg);
-int usbif_claim_port(usbif_be_claim_port_t *msg);
-void usbif_release_ports(usbif_priv_t *up);
-
-usbif_priv_t *usbif_find(domid_t domid);
-#define usbif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define usbif_put(_b)                             \
-    do {                                          \
-        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-            usbif_disconnect_complete(_b);        \
-    } while (0)
-
-
-void usbif_interface_init(void);
-void usbif_ctrlif_init(void);
-
-void usbif_deschedule(usbif_priv_t *up);
-void remove_from_usbif_list(usbif_priv_t *up);
-
-irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-
-#endif /* __USBIF__BACKEND__COMMON_H__ */
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/usbback/control.c
--- a/linux-2.6-xen-sparse/drivers/xen/usbback/control.c        Thu Sep 22 
15:05:44 2005
+++ /dev/null   Thu Sep 22 15:12:14 2005
@@ -1,61 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/usbif/backend/control.c
- * 
- * Routines for interfacing with the control plane.
- * 
- * Copyright (c) 2004, Keir Fraser
- */
-
-#include "common.h"
-
-static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-    DPRINTK("Received usbif backend message, subtype=%d\n", msg->subtype);
-    
-    switch ( msg->subtype )
-    {
-    case CMSG_USBIF_BE_CREATE:
-        usbif_create((usbif_be_create_t *)&msg->msg[0]);
-        break;        
-    case CMSG_USBIF_BE_DESTROY:
-        usbif_destroy((usbif_be_destroy_t *)&msg->msg[0]);
-        break;        
-    case CMSG_USBIF_BE_CONNECT:
-        usbif_connect((usbif_be_connect_t *)&msg->msg[0]);
-        break;        
-    case CMSG_USBIF_BE_DISCONNECT:
-        if ( !usbif_disconnect((usbif_be_disconnect_t *)&msg->msg[0],msg->id) )
-            return; /* Sending the response is deferred until later. */
-        break;        
-    case CMSG_USBIF_BE_CLAIM_PORT:
-       usbif_claim_port((usbif_be_claim_port_t *)&msg->msg[0]);
-        break;
-    case CMSG_USBIF_BE_RELEASE_PORT:
-        usbif_release_port((usbif_be_release_port_t *)&msg->msg[0]);
-        break;
-    default:
-        DPRINTK("Parse error while reading message subtype %d, len %d\n",
-                msg->subtype, msg->length);
-        msg->length = 0;
-        break;
-    }
-
-    ctrl_if_send_response(msg);
-}
-
-void usbif_ctrlif_init(void)
-{
-    ctrl_msg_t                       cmsg;
-    usbif_be_driver_status_changed_t st;
-
-    (void)ctrl_if_register_receiver(CMSG_USBIF_BE, usbif_ctrlif_rx, 
-                                    CALLBACK_IN_BLOCKING_CONTEXT);
-
-    /* Send a driver-UP notification to the domain controller. */
-    cmsg.type      = CMSG_USBIF_BE;
-    cmsg.subtype   = CMSG_USBIF_BE_DRIVER_STATUS_CHANGED;
-    cmsg.length    = sizeof(usbif_be_driver_status_changed_t);
-    st.status      = USBIF_DRIVER_STATUS_UP;
-    memcpy(cmsg.msg, &st, sizeof(st));
-    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/usbback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c      Thu Sep 22 
15:05:44 2005
+++ /dev/null   Thu Sep 22 15:12:14 2005
@@ -1,241 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/usbif/backend/interface.c
- * 
- * USB device interface management.
- * 
- * by Mark Williamson, Copyright (c) 2004
- */
-
-#include "common.h"
-
-#define USBIF_HASHSZ 1024
-#define USBIF_HASH(_d) (((int)(_d))&(USBIF_HASHSZ-1))
-
-static kmem_cache_t      *usbif_priv_cachep;
-static usbif_priv_t      *usbif_priv_hash[USBIF_HASHSZ];
-
-usbif_priv_t *usbif_find(domid_t domid)
-{
-    usbif_priv_t *up = usbif_priv_hash[USBIF_HASH(domid)];
-    while ( (up != NULL ) && ( up->domid != domid ) )
-        up = up->hash_next;
-    return up;
-}
-
-static void __usbif_disconnect_complete(void *arg)
-{
-    usbif_priv_t         *usbif = (usbif_priv_t *)arg;
-    ctrl_msg_t            cmsg;
-    usbif_be_disconnect_t disc;
-
-    /*
-     * These can't be done in usbif_disconnect() because at that point there
-     * may be outstanding requests at the device whose asynchronous responses
-     * must still be notified to the remote driver.
-     */
-    free_vm_area(usbif->usb_ring_area);
-
-    /* Construct the deferred response message. */
-    cmsg.type         = CMSG_USBIF_BE;
-    cmsg.subtype      = CMSG_USBIF_BE_DISCONNECT;
-    cmsg.id           = usbif->disconnect_rspid;
-    cmsg.length       = sizeof(usbif_be_disconnect_t);
-    disc.domid        = usbif->domid;
-    disc.status       = USBIF_BE_STATUS_OKAY;
-    memcpy(cmsg.msg, &disc, sizeof(disc));
-
-    /*
-     * Make sure message is constructed /before/ status change, because
-     * after the status change the 'usbif' structure could be deallocated at
-     * any time. Also make sure we send the response /after/ status change,
-     * as otherwise a subsequent CONNECT request could spuriously fail if
-     * another CPU doesn't see the status change yet.
-     */
-    mb();
-    if ( usbif->status != DISCONNECTING )
-        BUG();
-    usbif->status = DISCONNECTED;
-    mb();
-
-    /* Send the successful response. */
-    ctrl_if_send_response(&cmsg);
-}
-
-void usbif_disconnect_complete(usbif_priv_t *up)
-{
-    INIT_WORK(&up->work, __usbif_disconnect_complete, (void *)up);
-    schedule_work(&up->work);
-}
-
-void usbif_create(usbif_be_create_t *create)
-{
-    domid_t       domid  = create->domid;
-    usbif_priv_t **pup, *up;
-
-    if ( (up = kmem_cache_alloc(usbif_priv_cachep, GFP_KERNEL)) == NULL )
-    {
-        DPRINTK("Could not create usbif: out of memory\n");
-        create->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    memset(up, 0, sizeof(*up));
-    up->domid  = domid;
-    up->status = DISCONNECTED;
-    spin_lock_init(&up->usb_ring_lock);
-    atomic_set(&up->refcnt, 0);
-
-    pup = &usbif_priv_hash[USBIF_HASH(domid)];
-    while ( *pup != NULL )
-    {
-        if ( (*pup)->domid == domid )
-        {
-            create->status = USBIF_BE_STATUS_INTERFACE_EXISTS;
-            kmem_cache_free(usbif_priv_cachep, up);
-            return;
-        }
-        pup = &(*pup)->hash_next;
-    }
-
-    up->hash_next = *pup;
-    *pup = up;
-
-    create->status = USBIF_BE_STATUS_OKAY;
-}
-
-void usbif_destroy(usbif_be_destroy_t *destroy)
-{
-    domid_t       domid  = destroy->domid;
-    usbif_priv_t  **pup, *up;
-
-    pup = &usbif_priv_hash[USBIF_HASH(domid)];
-    while ( (up = *pup) != NULL )
-    {
-        if ( up->domid == domid )
-        {
-            if ( up->status != DISCONNECTED )
-                goto still_connected;
-            goto destroy;
-        }
-        pup = &up->hash_next;
-    }
-
-    destroy->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
-    return;
-
- still_connected:
-    destroy->status = USBIF_BE_STATUS_INTERFACE_CONNECTED;
-    return;
-
- destroy:
-    *pup = up->hash_next;
-    usbif_release_ports(up);
-    kmem_cache_free(usbif_priv_cachep, up);
-    destroy->status = USBIF_BE_STATUS_OKAY;
-}
-
-void usbif_connect(usbif_be_connect_t *connect)
-{
-    domid_t       domid  = connect->domid;
-    unsigned int  evtchn = connect->evtchn;
-    unsigned long shmem_frame = connect->shmem_frame;
-    pgprot_t      prot;
-    int           error;
-    usbif_priv_t *up;
-    usbif_sring_t *sring;
-
-    up = usbif_find(domid);
-    if ( unlikely(up == NULL) )
-    {
-        DPRINTK("usbif_connect attempted for non-existent usbif (%u)\n", 
-                connect->domid); 
-        connect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return;
-    }
-
-    if ( (up->usb_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-    {
-        connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
-        return;
-    }
-
-    prot = __pgprot(_KERNPG_TABLE);
-    error = direct_remap_pfn_range(&init_mm, AREALLOC_AREADDR(area->addr),
-                                    shmem_frame, PAGE_SIZE,
-                                    prot, domid);
-    if ( error != 0 )
-    {
-        if ( error == -ENOMEM )
-            connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
-        else if ( error == -EFAULT )
-            connect->status = USBIF_BE_STATUS_MAPPING_ERROR;
-        else
-            connect->status = USBIF_BE_STATUS_ERROR;
-        free_vm_area(up->usb_ring_area);
-        return;
-    }
-
-    if ( up->status != DISCONNECTED )
-    {
-        connect->status = USBIF_BE_STATUS_INTERFACE_CONNECTED;
-        free_vm_area(up->usb_ring_area);
-        return;
-    }
-
-    sring = (usbif_sring_t *)area->addr;
-    SHARED_RING_INIT(sring);
-    BACK_RING_INIT(&up->usb_ring, sring, PAGE_SIZE);
-
-    up->evtchn        = evtchn;
-    up->shmem_frame   = shmem_frame;
-    up->status        = CONNECTED;
-    usbif_get(up);
-
-    (void)bind_evtchn_to_irqhandler(
-        evtchn, usbif_be_int, 0, "usbif-backend", up);
-
-    connect->status = USBIF_BE_STATUS_OKAY;
-}
-
-/* Remove URBs for this interface before destroying it. */
-void usbif_deschedule(usbif_priv_t *up)
-{
-    remove_from_usbif_list(up);
-}
-
-int usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id)
-{
-    domid_t       domid  = disconnect->domid;
-    usbif_priv_t *up;
-
-    up = usbif_find(domid);
-    if ( unlikely(up == NULL) )
-    {
-        DPRINTK("usbif_disconnect attempted for non-existent usbif"
-                " (%u)\n", disconnect->domid); 
-        disconnect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
-        return 1; /* Caller will send response error message. */
-    }
-
-    if ( up->status == CONNECTED )
-    {
-        up->status = DISCONNECTING;
-        up->disconnect_rspid = rsp_id;
-        wmb(); /* Let other CPUs see the status change. */
-        unbind_evtchn_from_irqhandler(up->evtchn, up);
-       usbif_deschedule(up);
-        usbif_put(up);
-        return 0; /* Caller should not send response message. */
-    }
-
-    disconnect->status = USBIF_BE_STATUS_OKAY;
-    return 1;
-}
-
-void __init usbif_interface_init(void)
-{
-    usbif_priv_cachep = kmem_cache_create("usbif_priv_cache",
-                                         sizeof(usbif_priv_t), 
-                                         0, 0, NULL, NULL);
-    memset(usbif_priv_hash, 0, sizeof(usbif_priv_hash));
-}
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c
--- a/linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c        Thu Sep 22 
15:05:44 2005
+++ /dev/null   Thu Sep 22 15:12:14 2005
@@ -1,1068 +0,0 @@
-/******************************************************************************
- * arch/xen/drivers/usbif/backend/main.c
- * 
- * Backend for the Xen virtual USB driver - provides an abstraction of a
- * USB host controller to the corresponding frontend driver.
- *
- * by Mark Williamson
- * Copyright (c) 2004 Intel Research Cambridge
- * Copyright (c) 2004, 2005 Mark Williamson
- *
- * Based on arch/xen/drivers/blkif/backend/main.c
- * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
- */
-
-#include "common.h"
-
-
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/tqueue.h>
-
-/*
- * This is rather arbitrary.
- */
-#define MAX_PENDING_REQS 4
-#define BATCH_PER_DOMAIN 1
-
-static unsigned long mmap_vstart;
-
-/* Needs to be sufficiently large that we can map the (large) buffers
- * the USB mass storage driver wants. */
-#define MMAP_PAGES_PER_REQUEST \
-    (128)
-#define MMAP_PAGES             \
-    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
-
-#define MMAP_VADDR(_req,_seg)                        \
-    (mmap_vstart +                                   \
-     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
-     ((_seg) * PAGE_SIZE))
-
-
-static spinlock_t owned_ports_lock;
-LIST_HEAD(owned_ports);
-
-/* A list of these structures is used to track ownership of physical USB
- * ports. */
-typedef struct 
-{
-    usbif_priv_t     *usbif_priv;
-    char             path[16];
-    int               guest_port;
-    int enabled;
-    struct list_head  list;
-    unsigned long guest_address; /* The USB device address that has been
-                                  * assigned by the guest. */
-    int               dev_present; /* Is there a device present? */
-    struct usb_device * dev;
-    unsigned long ifaces;  /* What interfaces are present on this device? */
-} owned_port_t;
-
-
-/*
- * Each outstanding request that we've passed to the lower device layers has a
- * 'pending_req' allocated to it.  The request is complete, the specified
- * domain has a response queued for it, with the saved 'id' passed back.
- */
-typedef struct {
-    usbif_priv_t       *usbif_priv;
-    unsigned long      id;
-    int                nr_pages;
-    unsigned short     operation;
-    int                status;
-} pending_req_t;
-
-/*
- * We can't allocate pending_req's in order, since they may complete out of 
- * order. We therefore maintain an allocation ring. This ring also indicates 
- * when enough work has been passed down -- at that point the allocation ring 
- * will be empty.
- */
-static pending_req_t pending_reqs[MAX_PENDING_REQS];
-static unsigned char pending_ring[MAX_PENDING_REQS];
-static spinlock_t pend_prod_lock;
-
-/* NB. We use a different index type to differentiate from shared usb rings. */
-typedef unsigned int PEND_RING_IDX;
-#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-static PEND_RING_IDX pending_prod, pending_cons;
-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-
-static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do);
-static void make_response(usbif_priv_t *usbif, unsigned long id, 
-                          unsigned short op, int st, int inband,
-                         unsigned long actual_length);
-static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned 
long port);
-static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req);    
-static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid);
-static owned_port_t *usbif_find_port(char *);
-
-/******************************************************************
- * PRIVATE DEBUG FUNCTIONS
- */
-
-#undef DEBUG
-#ifdef DEBUG
-
-static void dump_port(owned_port_t *p)
-{
-    printk(KERN_DEBUG "owned_port_t @ %p\n"
-          "  usbif_priv @ %p\n"
-          "  path: %s\n"
-          "  guest_port: %d\n"
-          "  guest_address: %ld\n"
-          "  dev_present: %d\n"
-          "  dev @ %p\n"
-          "  ifaces: 0x%lx\n",
-          p, p->usbif_priv, p->path, p->guest_port, p->guest_address,
-          p->dev_present, p->dev, p->ifaces);
-}
-
-
-static void dump_request(usbif_request_t *req)
-{    
-    printk(KERN_DEBUG "id = 0x%lx\n"
-          "devnum %d\n"
-          "endpoint 0x%x\n"
-          "direction %d\n"
-          "speed %d\n"
-          "pipe_type 0x%x\n"
-          "transfer_buffer 0x%lx\n"
-          "length 0x%lx\n"
-          "transfer_flags 0x%lx\n"
-          "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n"
-          "iso_schedule = 0x%lx\n"
-          "num_iso %ld\n",
-          req->id, req->devnum, req->endpoint, req->direction, req->speed,
-          req->pipe_type, req->transfer_buffer, req->length,
-          req->transfer_flags, req->setup[0], req->setup[1], req->setup[2],
-          req->setup[3], req->setup[4], req->setup[5], req->setup[6],
-          req->setup[7], req->iso_schedule, req->num_iso);
-}
-
-static void dump_urb(struct urb *urb)
-{
-    printk(KERN_DEBUG "dumping urb @ %p\n", urb);
-
-#define DUMP_URB_FIELD(name, format) \
-    printk(KERN_DEBUG "  " # name " " format "\n", urb-> name)
-    
-    DUMP_URB_FIELD(pipe, "0x%x");
-    DUMP_URB_FIELD(status, "%d");
-    DUMP_URB_FIELD(transfer_flags, "0x%x");    
-    DUMP_URB_FIELD(transfer_buffer, "%p");
-    DUMP_URB_FIELD(transfer_buffer_length, "%d");
-    DUMP_URB_FIELD(actual_length, "%d");
-}
-
-static void dump_response(usbif_response_t *resp)
-{
-    printk(KERN_DEBUG "usbback: Sending response:\n"
-          "         id = 0x%x\n"
-          "         op = %d\n"
-          "         status = %d\n"
-          "         data = %d\n"
-          "         length = %d\n",
-          resp->id, resp->op, resp->status, resp->data, resp->length);
-}
-
-#else /* DEBUG */
-
-#define dump_port(blah)     ((void)0)
-#define dump_request(blah)   ((void)0)
-#define dump_urb(blah)      ((void)0)
-#define dump_response(blah) ((void)0)
-
-#endif /* DEBUG */
-
-/******************************************************************
- * MEMORY MANAGEMENT
- */
-
-static void fast_flush_area(int idx, int nr_pages)
-{
-    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
-    int               i;
-
-    for ( i = 0; i < nr_pages; i++ )
-    {
-       MULTI_update_va_mapping(mcl+i, MMAP_VADDR(idx, i),
-                               __pte(0), 0);
-    }
-
-    mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-    if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
-        BUG();
-}
-
-
-/******************************************************************
- * USB INTERFACE SCHEDULER LIST MAINTENANCE
- */
-
-static struct list_head usbio_schedule_list;
-static spinlock_t usbio_schedule_list_lock;
-
-static int __on_usbif_list(usbif_priv_t *up)
-{
-    return up->usbif_list.next != NULL;
-}
-
-void remove_from_usbif_list(usbif_priv_t *up)
-{
-    unsigned long flags;
-    if ( !__on_usbif_list(up) ) return;
-    spin_lock_irqsave(&usbio_schedule_list_lock, flags);
-    if ( __on_usbif_list(up) )
-    {
-        list_del(&up->usbif_list);
-        up->usbif_list.next = NULL;
-        usbif_put(up);
-    }
-    spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
-}
-
-static void add_to_usbif_list_tail(usbif_priv_t *up)
-{
-    unsigned long flags;
-    if ( __on_usbif_list(up) ) return;
-    spin_lock_irqsave(&usbio_schedule_list_lock, flags);
-    if ( !__on_usbif_list(up) && (up->status == CONNECTED) )
-    {
-        list_add_tail(&up->usbif_list, &usbio_schedule_list);
-        usbif_get(up);
-    }
-    spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
-}
-
-void free_pending(int pending_idx)
-{
-    unsigned long flags;
-
-    /* Free the pending request. */
-    spin_lock_irqsave(&pend_prod_lock, flags);
-    pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-    spin_unlock_irqrestore(&pend_prod_lock, flags);
-}
-
-/******************************************************************
- * COMPLETION CALLBACK -- Called as urb->complete()
- */
-
-static void maybe_trigger_usbio_schedule(void);
-
-static void __end_usb_io_op(struct urb *purb)
-{
-    pending_req_t *pending_req;
-    int pending_idx;
-
-    pending_req = purb->context;
-
-    pending_idx = pending_req - pending_reqs;
-
-    ASSERT(purb->actual_length <= purb->transfer_buffer_length);
-    ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE);
-    
-    /* An error fails the entire request. */
-    if ( purb->status )
-    {
-        printk(KERN_WARNING "URB @ %p failed. Status %d\n", purb, 
purb->status);
-    }
-
-    if ( usb_pipetype(purb->pipe) == 0 )
-    {
-        int i;
-        usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, 
pending_req->nr_pages - 1);
-
-        /* If we're dealing with an iso pipe, we need to copy back the 
schedule. */
-        for ( i = 0; i < purb->number_of_packets; i++ )
-        {
-            sched[i].length = purb->iso_frame_desc[i].actual_length;
-            ASSERT(sched[i].buffer_offset ==
-                   purb->iso_frame_desc[i].offset);
-            sched[i].status = purb->iso_frame_desc[i].status;
-        }
-    }
-    
-    fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages);
-
-    kfree(purb->setup_packet);
-
-    make_response(pending_req->usbif_priv, pending_req->id,
-                 pending_req->operation, pending_req->status, 0, 
purb->actual_length);
-    usbif_put(pending_req->usbif_priv);
-
-    usb_free_urb(purb);
-
-    free_pending(pending_idx);
-    
-    rmb();
-
-    /* Check for anything still waiting in the rings, having freed a 
request... */
-    maybe_trigger_usbio_schedule();
-}
-
-/******************************************************************
- * SCHEDULER FUNCTIONS
- */
-
-static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait);
-
-static int usbio_schedule(void *arg)
-{
-    DECLARE_WAITQUEUE(wq, current);
-
-    usbif_priv_t          *up;
-    struct list_head *ent;
-
-    daemonize();
-
-    for ( ; ; )
-    {
-        /* Wait for work to do. */
-        add_wait_queue(&usbio_schedule_wait, &wq);
-        set_current_state(TASK_INTERRUPTIBLE);
-        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
-             list_empty(&usbio_schedule_list) )
-            schedule();
-        __set_current_state(TASK_RUNNING);
-        remove_wait_queue(&usbio_schedule_wait, &wq);
-
-        /* Queue up a batch of requests. */
-        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
-                !list_empty(&usbio_schedule_list) )
-        {
-            ent = usbio_schedule_list.next;
-            up = list_entry(ent, usbif_priv_t, usbif_list);
-            usbif_get(up);
-            remove_from_usbif_list(up);
-            if ( do_usb_io_op(up, BATCH_PER_DOMAIN) )
-                add_to_usbif_list_tail(up);
-            usbif_put(up);
-        }
-    }
-}
-
-static void maybe_trigger_usbio_schedule(void)
-{
-    /*
-     * Needed so that two processes, who together make the following predicate
-     * true, don't both read stale values and evaluate the predicate
-     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
-     */
-    smp_mb();
-
-    if ( !list_empty(&usbio_schedule_list) )
-        wake_up(&usbio_schedule_wait);
-}
-
-
-/******************************************************************************
- * NOTIFICATION FROM GUEST OS.
- */
-
-irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-{
-    usbif_priv_t *up = dev_id;
-
-    smp_mb();
-
-    add_to_usbif_list_tail(up); 
-
-    /* Will in fact /always/ trigger an io schedule in this case. */
-    maybe_trigger_usbio_schedule();
-
-    return IRQ_HANDLED;
-}
-
-
-
-/******************************************************************
- * DOWNWARD CALLS -- These interface with the usb-device layer proper.
- */
-
-static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
-{
-    usbif_back_ring_t *usb_ring = &up->usb_ring;
-    usbif_request_t *req;
-    RING_IDX i, rp;
-    int more_to_do = 0;
-
-    rp = usb_ring->sring->req_prod;
-    rmb(); /* Ensure we see queued requests up to 'rp'. */
-    
-    /* Take items off the comms ring, taking care not to overflow. */
-    for ( i = usb_ring->req_cons; 
-          (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i);
-          i++ )
-    {
-        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
-        {
-            more_to_do = 1;
-            break;
-        }
-
-        req = RING_GET_REQUEST(usb_ring, i);
-        
-        switch ( req->operation )
-        {
-        case USBIF_OP_PROBE:
-            dispatch_usb_probe(up, req->id, req->port);
-            break;
-
-        case USBIF_OP_IO:
-         /* Assemble an appropriate URB. */
-         dispatch_usb_io(up, req);
-          break;
-
-       case USBIF_OP_RESET:
-         dispatch_usb_reset(up, req->port);
-          break;
-
-        default:
-            DPRINTK("error: unknown USB io operation [%d]\n",
-                    req->operation);
-            make_response(up, req->id, req->operation, -EINVAL, 0, 0);
-            break;
-        }
-    }
-
-    usb_ring->req_cons = i;
-
-    return more_to_do;
-}
-
-static owned_port_t *find_guest_port(usbif_priv_t *up, int port)
-{
-    unsigned long flags;
-    struct list_head *l;
-
-    spin_lock_irqsave(&owned_ports_lock, flags);
-    list_for_each(l, &owned_ports)
-    {
-        owned_port_t *p = list_entry(l, owned_port_t, list);
-        if(p->usbif_priv == up && p->guest_port == port)
-        {
-            spin_unlock_irqrestore(&owned_ports_lock, flags);
-            return p;
-        }
-    }
-    spin_unlock_irqrestore(&owned_ports_lock, flags);
-
-    return NULL;
-}
-
-static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid)
-{
-    owned_port_t *port = find_guest_port(up, portid);
-    int ret = 0;
-
-
-    /* Allowing the guest to actually reset the device causes more problems
-     * than it's worth.  We just fake it out in software but we will do a real
-     * reset when the interface is destroyed. */
-
-    dump_port(port);
-
-    port->guest_address = 0;
-    /* If there's an attached device then the port is now enabled. */
-    if ( port->dev_present )
-        port->enabled = 1;
-    else
-        port->enabled = 0;
-
-    make_response(up, 0, USBIF_OP_RESET, ret, 0, 0);
-}
-
-static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned 
long portid)
-{
-    owned_port_t *port = find_guest_port(up, portid);
-    int ret;
- 
-    if ( port != NULL )
-        ret = port->dev_present;
-    else
-    {
-        ret = -EINVAL;
-        printk(KERN_INFO "dispatch_usb_probe(): invalid port probe request "
-              "(port %ld)\n", portid);
-    }
-
-    /* Probe result is sent back in-band.  Probes don't have an associated id
-     * right now... */
-    make_response(up, id, USBIF_OP_PROBE, ret, portid, 0);
-}
-
-/**
- * check_iso_schedule - safety check the isochronous schedule for an URB
- * @purb : the URB in question
- */
-static int check_iso_schedule(struct urb *purb)
-{
-    int i;
-    unsigned long total_length = 0;
-    
-    for ( i = 0; i < purb->number_of_packets; i++ )
-    {
-        struct usb_iso_packet_descriptor *desc = &purb->iso_frame_desc[i];
-        
-        if ( desc->offset >= purb->transfer_buffer_length
-            || ( desc->offset + desc->length) > purb->transfer_buffer_length )
-            return -EINVAL;
-
-        total_length += desc->length;
-
-        if ( total_length > purb->transfer_buffer_length )
-            return -EINVAL;
-    }
-    
-    return 0;
-}
-
-owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req);
-
-static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req)
-{
-    unsigned long buffer_mach;
-    int i = 0, offset = 0,
-        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-    pending_req_t *pending_req;
-    unsigned long  remap_prot;
-    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
-    struct urb *purb = NULL;
-    owned_port_t *port;
-    unsigned char *setup;    
-
-    dump_request(req);
-
-    if ( NR_PENDING_REQS == MAX_PENDING_REQS )
-    {
-        printk(KERN_WARNING "usbback: Max requests already queued. "
-              "Giving up!\n");
-        
-        return;
-    }
-
-    port = find_port_for_request(up, req);
-
-    if ( port == NULL )
-    {
-       printk(KERN_WARNING "No such device! (%d)\n", req->devnum);
-       dump_request(req);
-
-        make_response(up, req->id, req->operation, -ENODEV, 0, 0);
-       return;
-    }
-    else if ( !port->dev_present )
-    {
-        /* In normal operation, we'll only get here if a device is unplugged
-         * and the frontend hasn't noticed yet. */
-        make_response(up, req->id, req->operation, -ENODEV, 0, 0);
-       return;
-    }
-        
-
-    setup = kmalloc(8, GFP_KERNEL);
-
-    if ( setup == NULL )
-        goto no_mem;
-   
-    /* Copy request out for safety. */
-    memcpy(setup, req->setup, 8);
-
-    if( setup[0] == 0x0 && setup[1] == 0x5)
-    {
-        /* To virtualise the USB address space, we need to intercept
-         * set_address messages and emulate.  From the USB specification:
-         * bmRequestType = 0x0;
-         * Brequest = SET_ADDRESS (i.e. 0x5)
-         * wValue = device address
-         * wIndex = 0
-         * wLength = 0
-         * data = None
-         */
-        /* Store into the guest transfer buffer using cpu_to_le16 */
-        port->guest_address = le16_to_cpu(*(u16 *)(setup + 2));
-        /* Make a successful response.  That was easy! */
-
-        make_response(up, req->id, req->operation, 0, 0, 0);
-
-       kfree(setup);
-        return;
-    }
-    else if ( setup[0] == 0x0 && setup[1] == 0x9 )
-    {
-        /* The host kernel needs to know what device configuration is in use
-         * because various error checks get confused otherwise.  We just do
-         * configuration settings here, under controlled conditions.
-         */
-
-      /* Ignore configuration setting and hope that the host kernel
-        did it right. */
-        /* usb_set_configuration(port->dev, setup[2]); */
-
-        make_response(up, req->id, req->operation, 0, 0, 0);
-
-        kfree(setup);
-        return;
-    }
-    else if ( setup[0] == 0x1 && setup[1] == 0xB )
-    {
-        /* The host kernel needs to know what device interface is in use
-         * because various error checks get confused otherwise.  We just do
-         * configuration settings here, under controlled conditions.
-         */
-        usb_set_interface(port->dev, (setup[4] | setup[5] << 8),
-                          (setup[2] | setup[3] << 8) );
-
-        make_response(up, req->id, req->operation, 0, 0, 0);
-
-        kfree(setup);
-        return;
-    }
-
-    if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK)
-          + req->length )
-        > MMAP_PAGES_PER_REQUEST * PAGE_SIZE )
-    {
-        printk(KERN_WARNING "usbback: request of %lu bytes too large\n",
-              req->length);
-        make_response(up, req->id, req->operation, -EINVAL, 0, 0);
-        kfree(setup);
-        return;
-    }
-    
-    buffer_mach = req->transfer_buffer;
-
-    if( buffer_mach == 0 )
-       goto no_remap;
-
-    ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST);
-    ASSERT(buffer_mach);
-
-    /* Always map writeable for now. */
-    remap_prot = _KERNPG_TABLE;
-
-    for ( i = 0, offset = 0; offset < req->length;
-          i++, offset += PAGE_SIZE )
-    {
-       MULTI_update_va_mapping_otherdomain(
-           mcl+i, MMAP_VADDR(pending_idx, i),
-           pfn_pte_ma((buffer_mach + offset) >> PAGE_SHIFT, remap_prot),
-           0, up->domid);
-        
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
-            FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
-
-        ASSERT(virt_to_mfn(MMAP_VADDR(pending_idx, i))
-               == ((buffer_mach >> PAGE_SHIFT) + i));
-    }
-
-    if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
-    {
-        /* Map in ISO schedule, if necessary. */
-       MULTI_update_va_mapping_otherdomain(
-           mcl+i, MMAP_VADDR(pending_idx, i),
-           pfn_pte_ma(req->iso_schedule >> PAGE_SHIFT, remap_prot),
-           0, up->domid);
-
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
-            FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
-    
-        i++;
-    }
-
-    if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) )
-        BUG();
-    
-    {
-        int j;
-        for ( j = 0; j < i; j++ )
-        {
-            if ( unlikely(mcl[j].result != 0) )
-            {
-                printk(KERN_WARNING
-                      "invalid buffer %d -- could not remap it\n", j);
-                fast_flush_area(pending_idx, i);
-                goto bad_descriptor;
-            }
-       }
-    }
-    
- no_remap:
-
-    ASSERT(i <= MMAP_PAGES_PER_REQUEST);
-    ASSERT(i * PAGE_SIZE >= req->length);
-
-    /* We have to do this because some things might complete out of order. */
-    pending_req = &pending_reqs[pending_idx];
-    pending_req->usbif_priv= up;
-    pending_req->id        = req->id;
-    pending_req->operation = req->operation;
-    pending_req->nr_pages  = i;
-
-    pending_cons++;
-
-    usbif_get(up);
-    
-    /* Fill out an actual request for the USB layer. */
-    purb = usb_alloc_urb(req->num_iso);
-
-    if ( purb == NULL )
-    {
-        usbif_put(up);
-        free_pending(pending_idx);
-        goto no_mem;
-    }
-
-    purb->dev = port->dev;
-    purb->context = pending_req;
-    purb->transfer_buffer =
-        (void *)(MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK));
-    if(buffer_mach == 0)
-      purb->transfer_buffer = NULL;
-    purb->complete = __end_usb_io_op;
-    purb->transfer_buffer_length = req->length;
-    purb->transfer_flags = req->transfer_flags;
-
-    purb->pipe = 0;
-    purb->pipe |= req->direction << 7;
-    purb->pipe |= port->dev->devnum << 8;
-    purb->pipe |= req->speed << 26;
-    purb->pipe |= req->pipe_type << 30;
-    purb->pipe |= req->endpoint << 15;
-
-    purb->number_of_packets = req->num_iso;
-
-    if ( purb->number_of_packets * sizeof(usbif_iso_t) > PAGE_SIZE )
-        goto urb_error;
-
-    /* Make sure there's always some kind of timeout. */
-    purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000
-                    :  1000;
-
-    purb->setup_packet = setup;
-
-    if ( req->pipe_type == 0 ) /* ISO */
-    {
-        int j;
-        usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1);
-
-        /* If we're dealing with an iso pipe, we need to copy in a schedule. */
-        for ( j = 0; j < purb->number_of_packets; j++ )
-        {
-            purb->iso_frame_desc[j].length = iso_sched[j].length;
-            purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset;
-            iso_sched[j].status = 0;
-        }
-    }
-
-    if ( check_iso_schedule(purb) != 0 )
-        goto urb_error;
-
-    if ( usb_submit_urb(purb) != 0 )
-        goto urb_error;
-
-    return;
-
- urb_error:
-    dump_urb(purb);    
-    usbif_put(up);
-    free_pending(pending_idx);
-
- bad_descriptor:
-    kfree ( setup );
-    if ( purb != NULL )
-        usb_free_urb(purb);
-    make_response(up, req->id, req->operation, -EINVAL, 0, 0);
-    return;
-    
- no_mem:
-    if ( setup != NULL )
-        kfree(setup);
-    make_response(up, req->id, req->operation, -ENOMEM, 0, 0);
-    return;
-} 
-
-
-
-/******************************************************************
- * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
- */
-
-
-static void make_response(usbif_priv_t *up, unsigned long id,
-                          unsigned short op, int st, int inband,
-                         unsigned long length)
-{
-    usbif_response_t *resp;
-    unsigned long     flags;
-    usbif_back_ring_t *usb_ring = &up->usb_ring;
-
-    /* Place on the response ring for the relevant domain. */ 
-    spin_lock_irqsave(&up->usb_ring_lock, flags);
-    resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt);
-    resp->id        = id;
-    resp->operation = op;
-    resp->status    = st;
-    resp->data      = inband;
-    resp->length = length;
-    wmb(); /* Ensure other side can see the response fields. */
-
-    dump_response(resp);
-
-    usb_ring->rsp_prod_pvt++;
-    RING_PUSH_RESPONSES(usb_ring);
-    spin_unlock_irqrestore(&up->usb_ring_lock, flags);
-
-    /* Kick the relevant domain. */
-    notify_via_evtchn(up->evtchn);
-}
-
-/**
- * usbif_claim_port - claim devices on a port on behalf of guest
- *
- * Once completed, this will ensure that any device attached to that
- * port is claimed by this driver for use by the guest.
- */
-int usbif_claim_port(usbif_be_claim_port_t *msg)
-{
-    owned_port_t *o_p;
-    
-    /* Sanity... */
-    if ( usbif_find_port(msg->path) != NULL )
-    {
-        printk(KERN_WARNING "usbback: Attempted to claim USB port "
-               "we already own!\n");
-        return -EINVAL;
-    }
-
-    /* No need for a slab cache - this should be infrequent. */
-    o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL);
-
-    if ( o_p == NULL )
-        return -ENOMEM;
-
-    o_p->enabled = 0;
-    o_p->usbif_priv = usbif_find(msg->domid);
-    o_p->guest_port = msg->usbif_port;
-    o_p->dev_present = 0;
-    o_p->guest_address = 0; /* Default address. */
-
-    strcpy(o_p->path, msg->path);
-
-    spin_lock_irq(&owned_ports_lock);
-    
-    list_add(&o_p->list, &owned_ports);
-
-    spin_unlock_irq(&owned_ports_lock);
-
-    printk(KERN_INFO "usbback: Claimed USB port (%s) for %d.%d\n", o_p->path,
-          msg->domid, msg->usbif_port);
-
-    /* Force a reprobe for unclaimed devices. */
-    usb_scan_devices();
-
-    return 0;
-}
-
-owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req)
-{
-    unsigned long flags;
-    struct list_head *port;
-
-    /* I'm assuming this is not called from IRQ context - correct?  I think
-     * it's probably only called in response to control messages or plug events
-     * in the USB hub kernel thread, so should be OK. */
-    spin_lock_irqsave(&owned_ports_lock, flags);
-    list_for_each(port, &owned_ports)
-    {
-        owned_port_t *p = list_entry(port, owned_port_t, list);
-        if(p->usbif_priv == up && p->guest_address == req->devnum && 
p->enabled )
-         {
-              dump_port(p);
-
-             spin_unlock_irqrestore(&owned_ports_lock, flags);
-              return p;
-         }
-    }
-    spin_unlock_irqrestore(&owned_ports_lock, flags);
-
-    return NULL;    
-}
-
-owned_port_t *__usbif_find_port(char *path)
-{
-    struct list_head *port;
-
-    list_for_each(port, &owned_ports)
-    {
-        owned_port_t *p = list_entry(port, owned_port_t, list);
-        if(!strcmp(path, p->path))
-        {
-            return p;
-        }
-    }
-
-    return NULL;
-}
-
-owned_port_t *usbif_find_port(char *path)
-{
-    owned_port_t *ret;
-    unsigned long flags;
-
-    spin_lock_irqsave(&owned_ports_lock, flags);
-    ret = __usbif_find_port(path);    
-    spin_unlock_irqrestore(&owned_ports_lock, flags);
-
-    return ret;
-}
-
-
-static void *probe(struct usb_device *dev, unsigned iface,
-                   const struct usb_device_id *id)
-{
-    owned_port_t *p;
-
-    /* We don't care what the device is - if we own the port, we want it.  We
-     * don't deal with device-specifics in this driver, so we don't care what
-     * the device actually is ;-) */
-    if ( ( p = usbif_find_port(dev->devpath) ) != NULL )
-    {
-        printk(KERN_INFO "usbback: claimed device attached to owned port\n");
-
-        p->dev_present = 1;
-        p->dev = dev;
-        set_bit(iface, &p->ifaces);
-        
-        return p->usbif_priv;
-    }
-    else
-        printk(KERN_INFO "usbback: hotplug for non-owned port (%s), 
ignoring\n",
-              dev->devpath);
-   
-
-    return NULL;
-}
-
-static void disconnect(struct usb_device *dev, void *usbif)
-{
-    /* Note the device is removed so we can tell the guest when it probes. */
-    owned_port_t *port = usbif_find_port(dev->devpath);
-    port->dev_present = 0;
-    port->dev = NULL;
-    port->ifaces = 0;
-}
-
-
-struct usb_driver driver =
-{
-    .owner      = THIS_MODULE,
-    .name       = "Xen USB Backend",
-    .probe      = probe,
-    .disconnect = disconnect,
-    .id_table   = NULL,
-};
-
-/* __usbif_release_port - internal mechanics for releasing a port */
-void __usbif_release_port(owned_port_t *p)
-{
-    int i;
-
-    for ( i = 0; p->ifaces != 0; i++)
-        if ( p->ifaces & 1 << i )
-        {
-            usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i));
-            clear_bit(i, &p->ifaces);
-        }
-    list_del(&p->list);
-
-    /* Reset the real device.  We don't simulate disconnect / probe for other
-     * drivers in this kernel because we assume the device is completely under
-     * the control of ourselves (i.e. the guest!).  This should ensure that the
-     * device is in a sane state for the next customer ;-) */
-
-    /* MAW NB: we're not resetting the real device here.  This looks perfectly
-     * valid to me but it causes memory corruption.  We seem to get away with 
not
-     * resetting for now, although it'd be nice to have this tracked down. */
-/*     if ( p->dev != NULL) */
-/*         usb_reset_device(p->dev); */
-
-    kfree(p);
-}
-
-
-/**
- * usbif_release_port - stop claiming devices on a port on behalf of guest
- */
-void usbif_release_port(usbif_be_release_port_t *msg)
-{
-    owned_port_t *p;
-
-    spin_lock_irq(&owned_ports_lock);
-    p = __usbif_find_port(msg->path);
-    __usbif_release_port(p);
-    spin_unlock_irq(&owned_ports_lock);
-}
-
-void usbif_release_ports(usbif_priv_t *up)
-{
-    struct list_head *port, *tmp;
-    unsigned long flags;
-    
-    spin_lock_irqsave(&owned_ports_lock, flags);
-    list_for_each_safe(port, tmp, &owned_ports)
-    {
-        owned_port_t *p = list_entry(port, owned_port_t, list);
-        if ( p->usbif_priv == up )
-            __usbif_release_port(p);
-    }
-    spin_unlock_irqrestore(&owned_ports_lock, flags);
-}
-
-static int __init usbif_init(void)
-{
-    int i;
-    struct page *page;
-
-    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
-         !(xen_start_info->flags & SIF_USB_BE_DOMAIN) )
-        return 0;
-
-    page = balloon_alloc_empty_page_range(MMAP_PAGES);
-    BUG_ON(page == NULL);
-    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-
-    pending_cons = 0;
-    pending_prod = MAX_PENDING_REQS;
-    memset(pending_reqs, 0, sizeof(pending_reqs));
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-        pending_ring[i] = i;
-
-    spin_lock_init(&pend_prod_lock);
-
-    spin_lock_init(&owned_ports_lock);
-    INIT_LIST_HEAD(&owned_ports);
-
-    spin_lock_init(&usbio_schedule_list_lock);
-    INIT_LIST_HEAD(&usbio_schedule_list);
-
-    if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
-        BUG();
-    
-    usbif_interface_init();
-
-    usbif_ctrlif_init();
-
-    usb_register(&driver);
-
-    printk(KERN_INFO "Xen USB Backend Initialised");
-
-    return 0;
-}
-
-__initcall(usbif_init);
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/usbfront/usbfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/usbfront/usbfront.c      Thu Sep 22 
15:05:44 2005
+++ /dev/null   Thu Sep 22 15:12:14 2005
@@ -1,1735 +0,0 @@
-/*
- * Xen Virtual USB Frontend Driver 
- *
- * This file contains the first version of the Xen virtual USB hub
- * that I've managed not to delete by mistake (3rd time lucky!).
- *
- * Based on Linux's uhci.c, original copyright notices are displayed
- * below.  Portions also (c) 2004 Intel Research Cambridge
- * and (c) 2004, 2005 Mark Williamson
- *
- * Contact <mark.williamson@xxxxxxxxxxxx> or
- * <xen-devel@xxxxxxxxxxxxxxxxxxxxx> regarding this code.
- *
- * Still to be (maybe) implemented:
- * - migration / backend restart support?
- * - support for building / using as a module
- */
-
-/*
- * Universal Host Controller Interface driver for USB.
- *
- * Maintainer: Johannes Erdfelt <johannes@xxxxxxxxxxx>
- *
- * (C) Copyright 1999 Linus Torvalds
- * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@xxxxxxxxxxx
- * (C) Copyright 1999 Randy Dunlap
- * (C) Copyright 1999 Georg Acher, acher@xxxxxxxxx
- * (C) Copyright 1999 Deti Fliegl, deti@xxxxxxxxx
- * (C) Copyright 1999 Thomas Sailer, sailer@xxxxxxxxxxxxxx
- * (C) Copyright 1999 Roman Weissgaerber, weissg@xxxxxxxxx
- * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
- *               support from usb-ohci.c by Adam Richter, adam@xxxxxxxxxxxxx).
- * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
- *
- * Intel documents this fairly well, and as far as I know there
- * are no royalties or anything like that, but even so there are
- * people who decided that they want to do the same thing in a
- * completely different way.
- *
- * WARNING! The USB documentation is downright evil. Most of it
- * is just crap, written by a committee. You're better off ignoring
- * most of it, the important stuff is:
- *  - the low-level protocol (fairly simple but lots of small details)
- *  - working around the horridness of the rest
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/smp_lock.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#ifdef CONFIG_USB_DEBUG
-#define DEBUG
-#else
-#undef DEBUG
-#endif
-#include <linux/usb.h>
-
-#include <asm/irq.h>
-#include <asm/system.h>
-
-#include "xhci.h"
-
-#include "../../../../../drivers/usb/hcd.h"
-
-#include <asm-xen/xen-public/io/usbif.h>
-#include <asm/xen-public/io/domain_controller.h>
-
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.0"
-#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \
-                      "Randy Dunlap, Georg Acher, Deti Fliegl, " \
-                      "Thomas Sailer, Roman Weissgaerber, Mark Williamson"
-#define DRIVER_DESC "Xen Virtual USB Host Controller Interface"
-
-/*
- * debug = 0, no debugging messages
- * debug = 1, dump failed URB's except for stalls
- * debug = 2, dump all failed URB's (including stalls)
- */
-#ifdef DEBUG
-static int debug = 1;
-#else
-static int debug = 0;
-#endif
-MODULE_PARM(debug, "i");
-MODULE_PARM_DESC(debug, "Debug level");
-static char *errbuf;
-#define ERRBUF_LEN    (PAGE_SIZE * 8)
-
-static int rh_submit_urb(struct urb *urb);
-static int rh_unlink_urb(struct urb *urb);
-static int xhci_unlink_urb(struct urb *urb);
-static void xhci_call_completion(struct urb *urb);
-static void xhci_drain_ring(void);
-static void xhci_transfer_result(struct xhci *xhci, struct urb *urb);
-static void xhci_finish_completion(void);
-
-#define MAX_URB_LOOP   2048            /* Maximum number of linked URB's */
-
-static kmem_cache_t *xhci_up_cachep;   /* urb_priv cache */
-static struct xhci *xhci;               /* XHCI structure for the interface */
-
-/******************************************************************************
- * DEBUGGING
- */
-
-#ifdef DEBUG
-
-static void dump_urb(struct urb *urb)
-{
-    printk(KERN_DEBUG "dumping urb @ %p\n"
-           "  hcpriv = %p\n"
-           "  next = %p\n"
-           "  dev = %p\n"
-           "  pipe = 0x%lx\n"
-           "  status = %d\n"
-           "  transfer_flags = 0x%lx\n"
-           "  transfer_buffer = %p\n"
-           "  transfer_buffer_length = %d\n"
-           "  actual_length = %d\n"
-           "  bandwidth = %d\n"
-           "  setup_packet = %p\n",
-           urb, urb->hcpriv, urb->next, urb->dev, urb->pipe, urb->status,
-           urb->transfer_flags, urb->transfer_buffer,
-           urb->transfer_buffer_length, urb->actual_length, urb->bandwidth,
-           urb->setup_packet);
-    if ( urb->setup_packet != NULL )
-        printk(KERN_DEBUG
-               "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
-               urb->setup_packet[0], urb->setup_packet[1],
-               urb->setup_packet[2], urb->setup_packet[3],
-               urb->setup_packet[4], urb->setup_packet[5],
-               urb->setup_packet[6], urb->setup_packet[7]);
-    printk(KERN_DEBUG "complete = %p\n"
-           "interval = %d\n", urb->complete, urb->interval);
-        
-}
-
-static void xhci_show_resp(usbif_response_t *r)
-{
-        printk(KERN_DEBUG "dumping response @ %p\n"
-               "  id=0x%lx\n"
-               "  op=0x%x\n"
-               "  data=0x%x\n"
-               "  status=0x%x\n"
-               "  length=0x%lx\n",
-               r->id, r->operation, r->data, r->status, r->length);
-}
-
-#define DPRINK(...) printk(KERN_DEBUG __VA_ARGS__)
-
-#else /* DEBUG */
-
-#define dump_urb(blah) ((void)0)
-#define xhci_show_resp(blah) ((void)0)
-#define DPRINTK(blah,...) ((void)0)
-
-#endif /* DEBUG */
-
-/******************************************************************************
- * RING REQUEST HANDLING
- */
-
-#define RING_PLUGGED(_hc) ( RING_FULL(&_hc->usb_ring) || _hc->recovery )
-
-/**
- * xhci_construct_isoc - add isochronous information to a request
- */
-static int xhci_construct_isoc(usbif_request_t *req, struct urb *urb)
-{
-        usbif_iso_t *schedule;
-        int i;
-        struct urb_priv *urb_priv = urb->hcpriv;
-        
-        req->num_iso = urb->number_of_packets;
-        schedule = (usbif_iso_t *)__get_free_page(GFP_KERNEL);
-
-        if ( schedule == NULL )
-            return -ENOMEM;
-
-        for ( i = 0; i < req->num_iso; i++ )
-        {
-                schedule[i].buffer_offset = urb->iso_frame_desc[i].offset;
-                schedule[i].length = urb->iso_frame_desc[i].length;
-        }
-
-        urb_priv->schedule = schedule;
-       req->iso_schedule = virt_to_mfn(schedule) << PAGE_SHIFT;
-
-        return 0;
-}
-
-/**
- * xhci_queue_req - construct and queue request for an URB
- */
-static int xhci_queue_req(struct urb *urb)
-{
-        unsigned long flags;
-        usbif_request_t *req;
-        usbif_front_ring_t *usb_ring = &xhci->usb_ring;
-
-#if DEBUG
-        printk(KERN_DEBUG
-               "usbif = %p, req_prod = %d (@ 0x%lx), resp_prod = %d, resp_cons 
= %d\n",
-               usbif, usbif->req_prod, virt_to_mfn(&usbif->req_prod),
-               usbif->resp_prod, xhci->usb_resp_cons);
-#endif
-        
-        spin_lock_irqsave(&xhci->ring_lock, flags);
-
-        if ( RING_PLUGGED(xhci) )
-        {
-                printk(KERN_WARNING
-                       "xhci_queue_req(): USB ring plugged, not queuing 
request\n");
-                spin_unlock_irqrestore(&xhci->ring_lock, flags);
-                return -ENOBUFS;
-        }
-
-        /* Stick something in the shared communications ring. */
-       req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
-
-        req->operation       = USBIF_OP_IO;
-        req->port            = 0; /* We don't care what the port is. */
-        req->id              = (unsigned long) urb->hcpriv;
-        req->transfer_buffer = virt_to_mfn(urb->transfer_buffer) << PAGE_SHIFT;
-       req->devnum          = usb_pipedevice(urb->pipe);
-        req->direction       = usb_pipein(urb->pipe);
-       req->speed           = usb_pipeslow(urb->pipe);
-        req->pipe_type       = usb_pipetype(urb->pipe);
-        req->length          = urb->transfer_buffer_length;
-        req->transfer_flags  = urb->transfer_flags;
-       req->endpoint        = usb_pipeendpoint(urb->pipe);
-       req->speed           = usb_pipeslow(urb->pipe);
-       req->timeout         = urb->timeout * (1000 / HZ);
-
-        if ( usb_pipetype(urb->pipe) == 0 ) /* ISO */
-        {
-            int ret = xhci_construct_isoc(req, urb);
-            if ( ret != 0 )
-                return ret;
-        }
-
-       if(urb->setup_packet != NULL)
-                memcpy(req->setup, urb->setup_packet, 8);
-        else
-                memset(req->setup, 0, 8);
-        
-        usb_ring->req_prod_pvt++;
-        RING_PUSH_REQUESTS(usb_ring);
-
-        spin_unlock_irqrestore(&xhci->ring_lock, flags);
-
-       notify_via_evtchn(xhci->evtchn);
-
-        DPRINTK("Queued request for an URB.\n");
-        dump_urb(urb);
-
-        return -EINPROGRESS;
-}
-
-/**
- * xhci_queue_probe - queue a probe request for a particular port
- */
-static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port)
-{
-        usbif_request_t *req;
-        usbif_front_ring_t *usb_ring = &xhci->usb_ring;
-
-#if DEBUG
-       printk(KERN_DEBUG
-               "queuing probe: req_prod = %d (@ 0x%lx), resp_prod = %d, "
-               "resp_cons = %d\n", usbif->req_prod,
-               virt_to_mfn(&usbif->req_prod),
-              usbif->resp_prod, xhci->usb_resp_cons);
-#endif
- 
-        /* This is always called from the timer interrupt. */
-        spin_lock(&xhci->ring_lock);
-       
-        if ( RING_PLUGGED(xhci) )
-        {
-                printk(KERN_WARNING
-                       "xhci_queue_probe(): ring full, not queuing request\n");
-                spin_unlock(&xhci->ring_lock);
-                return NULL;
-        }
-
-        /* Stick something in the shared communications ring. */
-        req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
-
-        memset(req, 0, sizeof(*req));
-
-        req->operation       = USBIF_OP_PROBE;
-        req->port            = port;
-
-        usb_ring->req_prod_pvt++;
-        RING_PUSH_REQUESTS(usb_ring);
-
-        spin_unlock(&xhci->ring_lock);
-
-       notify_via_evtchn(xhci->evtchn);
-
-        return req;
-}
-
-/**
- * xhci_port_reset - queue a reset request for a particular port
- */
-static int xhci_port_reset(usbif_vdev_t port)
-{
-        usbif_request_t *req;
-        usbif_front_ring_t *usb_ring = &xhci->usb_ring;
-
-        /* Only ever happens from process context (hub thread). */
-        spin_lock_irq(&xhci->ring_lock);
-
-        if ( RING_PLUGGED(xhci) )
-        {
-                printk(KERN_WARNING
-                       "xhci_port_reset(): ring plugged, not queuing 
request\n");
-                spin_unlock_irq(&xhci->ring_lock);
-                return -ENOBUFS;
-        }
-
-        /* We only reset one port at a time, so we only need one variable per
-         * hub. */
-        xhci->awaiting_reset = 1;
-        
-        /* Stick something in the shared communications ring. */
-       req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
-
-        memset(req, 0, sizeof(*req));
-
-        req->operation       = USBIF_OP_RESET;
-        req->port            = port;
-        
-        usb_ring->req_prod_pvt++;
-       RING_PUSH_REQUESTS(usb_ring);
-
-        spin_unlock_irq(&xhci->ring_lock);
-
-       notify_via_evtchn(xhci->evtchn);
-
-        while ( xhci->awaiting_reset > 0 )
-        {
-                mdelay(1);
-                xhci_drain_ring();
-        }
-
-       xhci->rh.ports[port].pe = 1;
-       xhci->rh.ports[port].pe_chg = 1;
-
-        return xhci->awaiting_reset;
-}
-
-
-/******************************************************************************
- * RING RESPONSE HANDLING
- */
-
-static void receive_usb_reset(usbif_response_t *resp)
-{
-    xhci->awaiting_reset = resp->status;
-    rmb();
-    
-}
-
-static void receive_usb_probe(usbif_response_t *resp)
-{
-    spin_lock(&xhci->rh.port_state_lock);
-
-    if ( resp->status >= 0 )
-    {
-        if ( resp->status == 1 )
-        {
-            /* If theres a device there and there wasn't one before there must
-             * have been a connection status change. */
-            if( xhci->rh.ports[resp->data].cs == 0 )
-           {
-                xhci->rh.ports[resp->data].cs = 1;
-                xhci->rh.ports[resp->data].cs_chg = 1;
-           }
-        }
-        else if ( resp->status == 0 )
-        {
-            if(xhci->rh.ports[resp->data].cs == 1 )
-            {
-                xhci->rh.ports[resp->data].cs  = 0;
-                xhci->rh.ports[resp->data].cs_chg = 1;
-               xhci->rh.ports[resp->data].pe = 0;
-               /* According to USB Spec v2.0, 11.24.2.7.2.2, we don't need
-                * to set pe_chg since an error has not occurred. */
-            }
-        }
-        else
-            printk(KERN_WARNING "receive_usb_probe(): unexpected status %d "
-                   "for port %d\n", resp->status, resp->data);
-    }
-    else if ( resp->status < 0)
-        printk(KERN_WARNING "receive_usb_probe(): got error status %d\n",
-               resp->status);
-
-    spin_unlock(&xhci->rh.port_state_lock);
-}
-
-static void receive_usb_io(usbif_response_t *resp)
-{
-        struct urb_priv *urbp = (struct urb_priv *)resp->id;
-        struct urb *urb = urbp->urb;
-
-        urb->actual_length = resp->length;
-        urbp->in_progress = 0;
-
-        if( usb_pipetype(urb->pipe) == 0 ) /* ISO */
-        {
-                int i;
-              
-                /* Copy ISO schedule results back in. */
-                for ( i = 0; i < urb->number_of_packets; i++ )
-                {
-                        urb->iso_frame_desc[i].status
-                                = urbp->schedule[i].status;
-                        urb->iso_frame_desc[i].actual_length
-                                = urbp->schedule[i].length;
-                }
-                free_page((unsigned long)urbp->schedule);
-        }
-
-        /* Only set status if it's not been changed since submission.  It might
-         * have been changed if the URB has been unlinked asynchronously, for
-         * instance. */
-       if ( urb->status == -EINPROGRESS )
-                urbp->status = urb->status = resp->status;
-}
-
-/**
- * xhci_drain_ring - drain responses from the ring, calling handlers
- *
- * This may be called from interrupt context when an event is received from the
- * backend domain, or sometimes in process context whilst waiting for a port
- * reset or URB completion.
- */
-static void xhci_drain_ring(void)
-{
-       struct list_head *tmp, *head;
-       usbif_front_ring_t *usb_ring = &xhci->usb_ring;
-       usbif_response_t *resp;
-        RING_IDX i, rp;
-
-        /* Walk the ring here to get responses, updating URBs to show what
-         * completed. */
-        
-        rp = usb_ring->sring->rsp_prod;
-        rmb(); /* Ensure we see queued requests up to 'rp'. */
-
-        /* Take items off the comms ring, taking care not to overflow. */
-        for ( i = usb_ring->rsp_cons; i != rp; i++ )
-        {
-            resp = RING_GET_RESPONSE(usb_ring, i);
-            
-            /* May need to deal with batching and with putting a ceiling on
-               the number dispatched for performance and anti-dos reasons */
-
-            xhci_show_resp(resp);
-
-            switch ( resp->operation )
-            {
-            case USBIF_OP_PROBE:
-                receive_usb_probe(resp);
-                break;
-                
-            case USBIF_OP_IO:
-                receive_usb_io(resp);
-                break;
-
-            case USBIF_OP_RESET:
-                receive_usb_reset(resp);
-                break;
-
-            default:
-                printk(KERN_WARNING
-                       "error: unknown USB io operation response [%d]\n",
-                       resp->operation);
-                break;
-            }
-        }
-
-        usb_ring->rsp_cons = i;
-
-       /* Walk the list of pending URB's to see which ones completed and do
-         * callbacks, etc. */
-       spin_lock(&xhci->urb_list_lock);
-       head = &xhci->urb_list;
-       tmp = head->next;
-       while (tmp != head) {
-               struct urb *urb = list_entry(tmp, struct urb, urb_list);
-
-               tmp = tmp->next;
-
-               /* Checks the status and does all of the magic necessary */
-               xhci_transfer_result(xhci, urb);
-       }
-       spin_unlock(&xhci->urb_list_lock);
-
-       xhci_finish_completion();
-}
-
-
-static void xhci_interrupt(int irq, void *__xhci, struct pt_regs *regs)
-{
-        xhci_drain_ring();
-}
-
-/******************************************************************************
- * HOST CONTROLLER FUNCTIONALITY
- */
-
-/**
- * no-op implementation of private device alloc / free routines
- */
-static int xhci_do_nothing_dev(struct usb_device *dev)
-{
-       return 0;
-}
-
-static inline void xhci_add_complete(struct urb *urb)
-{
-       struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
-       unsigned long flags;
-
-       spin_lock_irqsave(&xhci->complete_list_lock, flags);
-       list_add_tail(&urbp->complete_list, &xhci->complete_list);
-       spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
-}
-
-/* When this returns, the owner of the URB may free its
- * storage.
- *
- * We spin and wait for the URB to complete before returning.
- *
- * Call with urb->lock acquired.
- */
-static void xhci_delete_urb(struct urb *urb)
-{
-        struct urb_priv *urbp;
-
-       urbp = urb->hcpriv;
-
-        /* If there's no urb_priv structure for this URB then it can't have
-         * been submitted at all. */
-       if ( urbp == NULL )
-               return;
-
-       /* For now we just spin until the URB completes.  It shouldn't take too
-         * long and we don't expect to have to do this very often. */
-       while ( urb->status == -EINPROGRESS )
-        {
-            xhci_drain_ring();
-            mdelay(1);
-        }
-
-       /* Now we know that further transfers to the buffer won't
-        * occur, so we can safely return. */
-}
-
-static struct urb_priv *xhci_alloc_urb_priv(struct urb *urb)
-{
-       struct urb_priv *urbp;
-
-       urbp = kmem_cache_alloc(xhci_up_cachep, SLAB_ATOMIC);
-       if (!urbp) {
-               err("xhci_alloc_urb_priv: couldn't allocate memory for 
urb_priv\n");
-               return NULL;
-       }
-
-       memset((void *)urbp, 0, sizeof(*urbp));
-
-       urbp->inserttime = jiffies;
-       urbp->urb = urb;
-       urbp->dev = urb->dev;
-       
-       INIT_LIST_HEAD(&urbp->complete_list);
-
-       urb->hcpriv = urbp;
-
-       return urbp;
-}
-
-/*
- * MUST be called with urb->lock acquired
- */
-/* When is this called?  Do we need to stop the transfer (as we
- * currently do)? */
-static void xhci_destroy_urb_priv(struct urb *urb)
-{
-    struct urb_priv *urbp;
-    
-    urbp = (struct urb_priv *)urb->hcpriv;
-    if (!urbp)
-        return;
-
-    if (!list_empty(&urb->urb_list))
-        warn("xhci_destroy_urb_priv: urb %p still on xhci->urb_list", urb);
-    
-    if (!list_empty(&urbp->complete_list))
-        warn("xhci_destroy_urb_priv: urb %p still on xhci->complete_list", 
urb);
-    
-    kmem_cache_free(xhci_up_cachep, urb->hcpriv);
-
-    urb->hcpriv = NULL;
-}
-
-/**
- * Try to find URBs in progress on the same pipe to the same device.
- *
- * MUST be called with xhci->urb_list_lock acquired
- */
-static struct urb *xhci_find_urb_ep(struct xhci *xhci, struct urb *urb)
-{
-       struct list_head *tmp, *head;
-
-       /* We don't match Isoc transfers since they are special */
-       if (usb_pipeisoc(urb->pipe))
-               return NULL;
-
-       head = &xhci->urb_list;
-       tmp = head->next;
-       while (tmp != head) {
-               struct urb *u = list_entry(tmp, struct urb, urb_list);
-
-               tmp = tmp->next;
-
-               if (u->dev == urb->dev && u->pipe == urb->pipe &&
-                   u->status == -EINPROGRESS)
-                       return u;
-       }
-
-       return NULL;
-}
-
-static int xhci_submit_urb(struct urb *urb)
-{
-       int ret = -EINVAL;
-       unsigned long flags;
-       struct urb *eurb;
-       int bustime;
-
-        DPRINTK("URB submitted to XHCI driver.\n");
-        dump_urb(urb);
-
-       if (!urb)
-               return -EINVAL;
-
-       if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) {
-               warn("xhci_submit_urb: urb %p belongs to disconnected device or 
bus?", urb);
-               return -ENODEV;
-       }
-
-        if ( urb->dev->devpath == NULL )
-                BUG();
-
-       usb_inc_dev_use(urb->dev);
-
-       spin_lock_irqsave(&xhci->urb_list_lock, flags);
-       spin_lock(&urb->lock);
-
-       if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET ||
-           urb->status == -ECONNABORTED) {
-               dbg("xhci_submit_urb: urb not available to submit (status = 
%d)", urb->status);
-               /* Since we can have problems on the out path */
-               spin_unlock(&urb->lock);
-               spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-               usb_dec_dev_use(urb->dev);
-
-               return ret;
-       }
-
-       INIT_LIST_HEAD(&urb->urb_list);
-       if (!xhci_alloc_urb_priv(urb)) {
-               ret = -ENOMEM;
-
-               goto out;
-       }
-
-        ( (struct urb_priv *)urb->hcpriv )->in_progress = 1;
-
-       eurb = xhci_find_urb_ep(xhci, urb);
-       if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
-               ret = -ENXIO;
-
-               goto out;
-       }
-
-       /* Short circuit the virtual root hub */
-       if (urb->dev == xhci->rh.dev) {
-               ret = rh_submit_urb(urb);
-
-               goto out;
-       }
-
-       switch (usb_pipetype(urb->pipe)) {
-       case PIPE_CONTROL:
-       case PIPE_BULK:
-               ret = xhci_queue_req(urb);
-               break;
-
-       case PIPE_INTERRUPT:
-               if (urb->bandwidth == 0) {      /* not yet checked/allocated */
-                       bustime = usb_check_bandwidth(urb->dev, urb);
-                       if (bustime < 0)
-                               ret = bustime;
-                       else {
-                               ret = xhci_queue_req(urb);
-                               if (ret == -EINPROGRESS)
-                                       usb_claim_bandwidth(urb->dev, urb,
-                                                            bustime, 0);
-                       }
-               } else          /* bandwidth is already set */
-                       ret = xhci_queue_req(urb);
-               break;
-
-       case PIPE_ISOCHRONOUS:
-               if (urb->bandwidth == 0) {      /* not yet checked/allocated */
-                       if (urb->number_of_packets <= 0) {
-                               ret = -EINVAL;
-                               break;
-                       }
-                       bustime = usb_check_bandwidth(urb->dev, urb);
-                       if (bustime < 0) {
-                               ret = bustime;
-                               break;
-                       }
-
-                       ret = xhci_queue_req(urb);
-                       if (ret == -EINPROGRESS)
-                               usb_claim_bandwidth(urb->dev, urb, bustime, 1);
-               } else          /* bandwidth is already set */
-                       ret = xhci_queue_req(urb);
-               break;
-       }
-out:
-       urb->status = ret;
-
-       if (ret == -EINPROGRESS) {
-               /* We use _tail to make find_urb_ep more efficient */
-               list_add_tail(&urb->urb_list, &xhci->urb_list);
-
-               spin_unlock(&urb->lock);
-               spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-
-               return 0;
-       }
-
-       xhci_delete_urb(urb);
-
-       spin_unlock(&urb->lock);
-       spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-
-       /* Only call completion if it was successful */
-       if (!ret)
-               xhci_call_completion(urb);
-
-       return ret;
-}
-
-/*
- * Return the result of a transfer
- *
- * MUST be called with urb_list_lock acquired
- */
-static void xhci_transfer_result(struct xhci *xhci, struct urb *urb)
-{
-       int ret = 0;
-       unsigned long flags;
-       struct urb_priv *urbp;
-
-       /* The root hub is special */
-       if (urb->dev == xhci->rh.dev)
-               return;
-
-       spin_lock_irqsave(&urb->lock, flags);
-
-       urbp = (struct urb_priv *)urb->hcpriv;
-
-        if ( ( (struct urb_priv *)urb->hcpriv )->in_progress )
-                ret = -EINPROGRESS;
-
-        if (urb->actual_length < urb->transfer_buffer_length) {
-                if (urb->transfer_flags & USB_DISABLE_SPD) {
-                        ret = -EREMOTEIO;
-                }
-        }
-
-       if (urb->status == -EPIPE)
-        {
-                ret = urb->status;
-               /* endpoint has stalled - mark it halted */
-               usb_endpoint_halt(urb->dev, usb_pipeendpoint(urb->pipe),
-                                  usb_pipeout(urb->pipe));
-        }
-
-       if ((debug == 1 && ret != 0 && ret != -EPIPE) ||
-            (ret != 0 && debug > 1)) {
-               /* Some debugging code */
-               dbg("xhci_result_interrupt/bulk() failed with status %x",
-                       status);
-       }
-
-       if (ret == -EINPROGRESS)
-               goto out;
-
-       switch (usb_pipetype(urb->pipe)) {
-       case PIPE_CONTROL:
-       case PIPE_BULK:
-       case PIPE_ISOCHRONOUS:
-               /* Release bandwidth for Interrupt or Isoc. transfers */
-               /* Spinlock needed ? */
-               if (urb->bandwidth)
-                       usb_release_bandwidth(urb->dev, urb, 1);
-               xhci_delete_urb(urb);
-               break;
-       case PIPE_INTERRUPT:
-               /* Interrupts are an exception */
-               if (urb->interval)
-                       goto out_complete;
-
-               /* Release bandwidth for Interrupt or Isoc. transfers */
-               /* Spinlock needed ? */
-               if (urb->bandwidth)
-                       usb_release_bandwidth(urb->dev, urb, 0);
-               xhci_delete_urb(urb);
-               break;
-       default:
-               info("xhci_transfer_result: unknown pipe type %d for urb %p\n",
-                     usb_pipetype(urb->pipe), urb);
-       }
-
-       /* Remove it from xhci->urb_list */
-       list_del_init(&urb->urb_list);
-
-out_complete:
-       xhci_add_complete(urb);
-
-out:
-       spin_unlock_irqrestore(&urb->lock, flags);
-}
-
-static int xhci_unlink_urb(struct urb *urb)
-{
-       unsigned long flags;
-       struct urb_priv *urbp = urb->hcpriv;
-
-       if (!urb)
-               return -EINVAL;
-
-       if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
-               return -ENODEV;
-
-       spin_lock_irqsave(&xhci->urb_list_lock, flags);
-       spin_lock(&urb->lock);
-
-       /* Release bandwidth for Interrupt or Isoc. transfers */
-       /* Spinlock needed ? */
-       if (urb->bandwidth) {
-               switch (usb_pipetype(urb->pipe)) {
-               case PIPE_INTERRUPT:
-                       usb_release_bandwidth(urb->dev, urb, 0);
-                       break;
-               case PIPE_ISOCHRONOUS:
-                       usb_release_bandwidth(urb->dev, urb, 1);
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       if (urb->status != -EINPROGRESS) {
-               spin_unlock(&urb->lock);
-               spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-               return 0;
-       }
-
-       list_del_init(&urb->urb_list);
-
-       /* Short circuit the virtual root hub */
-       if (urb->dev == xhci->rh.dev) {
-               rh_unlink_urb(urb);
-
-               spin_unlock(&urb->lock);
-               spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-
-               xhci_call_completion(urb);
-       } else {
-               if (urb->transfer_flags & USB_ASYNC_UNLINK) {
-                        /* We currently don't currently attempt to cancel URBs
-                         * that have been queued in the ring.  We handle async
-                         * unlinked URBs when they complete. */
-                       urbp->status = urb->status = -ECONNABORTED;
-                       spin_unlock(&urb->lock);
-                       spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-               } else {
-                       urb->status = -ENOENT;
-
-                       spin_unlock(&urb->lock);
-                       spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-
-                       if (in_interrupt()) {   /* wait at least 1 frame */
-                               static int errorcount = 10;
-
-                               if (errorcount--)
-                                       dbg("xhci_unlink_urb called from 
interrupt for urb %p", urb);
-                               udelay(1000);
-                       } else
-                               schedule_timeout(1+1*HZ/1000); 
-
-                        xhci_delete_urb(urb);
-
-                       xhci_call_completion(urb);
-               }
-       }
-
-       return 0;
-}
-
-static void xhci_call_completion(struct urb *urb)
-{
-       struct urb_priv *urbp;
-       struct usb_device *dev = urb->dev;
-       int is_ring = 0, killed, resubmit_interrupt, status;
-       struct urb *nurb;
-       unsigned long flags;
-
-       spin_lock_irqsave(&urb->lock, flags);
-
-       urbp = (struct urb_priv *)urb->hcpriv;
-       if (!urbp || !urb->dev) {
-               spin_unlock_irqrestore(&urb->lock, flags);
-               return;
-       }
-
-       killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
-                       urb->status == -ECONNRESET);
-       resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
-                       urb->interval);
-
-       nurb = urb->next;
-       if (nurb && !killed) {
-               int count = 0;
-
-               while (nurb && nurb != urb && count < MAX_URB_LOOP) {
-                       if (nurb->status == -ENOENT ||
-                           nurb->status == -ECONNABORTED ||
-                           nurb->status == -ECONNRESET) {
-                               killed = 1;
-                               break;
-                       }
-
-                       nurb = nurb->next;
-                       count++;
-               }
-
-               if (count == MAX_URB_LOOP)
-                       err("xhci_call_completion: too many linked URB's, loop? 
(first loop)");
-
-               /* Check to see if chain is a ring */
-               is_ring = (nurb == urb);
-       }
-
-       status = urbp->status;
-       if (!resubmit_interrupt || killed)
-               /* We don't need urb_priv anymore */
-               xhci_destroy_urb_priv(urb);
-
-       if (!killed)
-               urb->status = status;
-
-       spin_unlock_irqrestore(&urb->lock, flags);
-
-       if (urb->complete)
-               urb->complete(urb);
-
-       if (resubmit_interrupt)
-               /* Recheck the status. The completion handler may have */
-               /*  unlinked the resubmitting interrupt URB */
-               killed = (urb->status == -ENOENT ||
-                         urb->status == -ECONNABORTED ||
-                         urb->status == -ECONNRESET);
-
-       if (resubmit_interrupt && !killed) {
-                if ( urb->dev != xhci->rh.dev )
-                        xhci_queue_req(urb); /* XXX What if this fails? */
-                /* Don't need to resubmit URBs for the virtual root dev. */
-       } else {
-               if (is_ring && !killed) {
-                       urb->dev = dev;
-                       xhci_submit_urb(urb);
-               } else {
-                       /* We decrement the usage count after we're done */
-                       /*  with everything */
-                       usb_dec_dev_use(dev);
-               }
-       }
-}
-
-static void xhci_finish_completion(void)
-{
-       struct list_head *tmp, *head;
-       unsigned long flags;
-
-       spin_lock_irqsave(&xhci->complete_list_lock, flags);
-       head = &xhci->complete_list;
-       tmp = head->next;
-       while (tmp != head) {
-               struct urb_priv *urbp = list_entry(tmp, struct urb_priv,
-                                                   complete_list);
-               struct urb *urb = urbp->urb;
-
-               list_del_init(&urbp->complete_list);
-               spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
-
-               xhci_call_completion(urb);
-
-               spin_lock_irqsave(&xhci->complete_list_lock, flags);
-               head = &xhci->complete_list;
-               tmp = head->next;
-       }
-       spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
-}
-
-static struct usb_operations xhci_device_operations = {
-       .allocate = xhci_do_nothing_dev,
-       .deallocate = xhci_do_nothing_dev,
-        /* It doesn't look like any drivers actually care what the frame number
-        * is at the moment!  If necessary, we could approximate the current
-        * frame nubmer by passing it from the backend in response messages. */
-       .get_frame_number = NULL,
-       .submit_urb = xhci_submit_urb,
-       .unlink_urb = xhci_unlink_urb
-};
-
-/******************************************************************************
- * VIRTUAL ROOT HUB EMULATION
- */
-
-static __u8 root_hub_dev_des[] =
-{
-       0x12,                   /*  __u8  bLength; */
-       0x01,                   /*  __u8  bDescriptorType; Device */
-       0x00,                   /*  __u16 bcdUSB; v1.0 */
-       0x01,
-       0x09,                   /*  __u8  bDeviceClass; HUB_CLASSCODE */
-       0x00,                   /*  __u8  bDeviceSubClass; */
-       0x00,                   /*  __u8  bDeviceProtocol; */
-       0x08,                   /*  __u8  bMaxPacketSize0; 8 Bytes */
-       0x00,                   /*  __u16 idVendor; */
-       0x00,
-       0x00,                   /*  __u16 idProduct; */
-       0x00,
-       0x00,                   /*  __u16 bcdDevice; */
-       0x00,
-       0x00,                   /*  __u8  iManufacturer; */
-       0x02,                   /*  __u8  iProduct; */
-       0x01,                   /*  __u8  iSerialNumber; */
-       0x01                    /*  __u8  bNumConfigurations; */
-};
-
-
-/* Configuration descriptor */
-static __u8 root_hub_config_des[] =
-{
-       0x09,                   /*  __u8  bLength; */
-       0x02,                   /*  __u8  bDescriptorType; Configuration */
-       0x19,                   /*  __u16 wTotalLength; */
-       0x00,
-       0x01,                   /*  __u8  bNumInterfaces; */
-       0x01,                   /*  __u8  bConfigurationValue; */
-       0x00,                   /*  __u8  iConfiguration; */
-       0x40,                   /*  __u8  bmAttributes;
-                                       Bit 7: Bus-powered, 6: Self-powered,
-                                       Bit 5 Remote-wakeup, 4..0: resvd */
-       0x00,                   /*  __u8  MaxPower; */
-
-       /* interface */
-       0x09,                   /*  __u8  if_bLength; */
-       0x04,                   /*  __u8  if_bDescriptorType; Interface */
-       0x00,                   /*  __u8  if_bInterfaceNumber; */
-       0x00,                   /*  __u8  if_bAlternateSetting; */
-       0x01,                   /*  __u8  if_bNumEndpoints; */
-       0x09,                   /*  __u8  if_bInterfaceClass; HUB_CLASSCODE */
-       0x00,                   /*  __u8  if_bInterfaceSubClass; */
-       0x00,                   /*  __u8  if_bInterfaceProtocol; */
-       0x00,                   /*  __u8  if_iInterface; */
-
-       /* endpoint */
-       0x07,                   /*  __u8  ep_bLength; */
-       0x05,                   /*  __u8  ep_bDescriptorType; Endpoint */
-       0x81,                   /*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
-       0x03,                   /*  __u8  ep_bmAttributes; Interrupt */
-       0x08,                   /*  __u16 ep_wMaxPacketSize; 8 Bytes */
-       0x00,
-       0xff                    /*  __u8  ep_bInterval; 255 ms */
-};
-
-static __u8 root_hub_hub_des[] =
-{
-       0x09,                   /*  __u8  bLength; */
-       0x29,                   /*  __u8  bDescriptorType; Hub-descriptor */
-       0x02,                   /*  __u8  bNbrPorts; */
-       0x00,                   /* __u16  wHubCharacteristics; */
-       0x00,
-       0x01,                   /*  __u8  bPwrOn2pwrGood; 2ms */
-       0x00,                   /*  __u8  bHubContrCurrent; 0 mA */
-       0x00,                   /*  __u8  DeviceRemovable; *** 7 Ports max *** 
*/
-       0xff                    /*  __u8  PortPwrCtrlMask; *** 7 ports max *** 
*/
-};
-
-/* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
-static int rh_send_irq(struct urb *urb)
-{
-       struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
-        xhci_port_t *ports = xhci->rh.ports;
-       unsigned long flags;
-       int i, len = 1;
-       __u16 data = 0;
-
-       spin_lock_irqsave(&urb->lock, flags);
-       for (i = 0; i < xhci->rh.numports; i++) {
-                /* Set a bit if anything at all has changed on the port, as per
-                * USB spec 11.12 */
-               data |= (ports[i].cs_chg || ports[i].pe_chg )
-                        ? (1 << (i + 1))
-                        : 0;
-
-               len = (i + 1) / 8 + 1;
-       }
-
-       *(__u16 *) urb->transfer_buffer = cpu_to_le16(data);
-       urb->actual_length = len;
-       urbp->status = 0;
-
-       spin_unlock_irqrestore(&urb->lock, flags);
-
-       if ((data > 0) && (xhci->rh.send != 0)) {
-               dbg("root-hub INT complete: data: %x", data);
-               xhci_call_completion(urb);
-       }
-
-       return 0;
-}
-
-/* Virtual Root Hub INTs are polled by this timer every "interval" ms */
-static int rh_init_int_timer(struct urb *urb);
-
-static void rh_int_timer_do(unsigned long ptr)
-{
-       struct urb *urb = (struct urb *)ptr;
-       struct list_head list, *tmp, *head;
-       unsigned long flags;
-       int i;
-
-       for ( i = 0; i < xhci->rh.numports; i++)
-                xhci_queue_probe(i);
-
-       if (xhci->rh.send)
-               rh_send_irq(urb);
-
-       INIT_LIST_HEAD(&list);
-
-       spin_lock_irqsave(&xhci->urb_list_lock, flags);
-       head = &xhci->urb_list;
-       tmp = head->next;
-       while (tmp != head) {
-               struct urb *u = list_entry(tmp, struct urb, urb_list);
-               struct urb_priv *up = (struct urb_priv *)u->hcpriv;
-
-               tmp = tmp->next;
-
-               spin_lock(&u->lock);
-
-               /* Check if the URB timed out */
-               if (u->timeout && time_after_eq(jiffies,
-                                                up->inserttime + u->timeout)) {
-                       list_del(&u->urb_list);
-                       list_add_tail(&u->urb_list, &list);
-               }
-
-               spin_unlock(&u->lock);
-       }
-       spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
-
-       head = &list;
-       tmp = head->next;
-       while (tmp != head) {
-               struct urb *u = list_entry(tmp, struct urb, urb_list);
-
-               tmp = tmp->next;
-
-               u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
-               xhci_unlink_urb(u);
-       }
-
-       rh_init_int_timer(urb);
-}
-
-/* Root Hub INTs are polled by this timer */
-static int rh_init_int_timer(struct urb *urb)
-{
-       xhci->rh.interval = urb->interval;
-       init_timer(&xhci->rh.rh_int_timer);
-       xhci->rh.rh_int_timer.function = rh_int_timer_do;
-       xhci->rh.rh_int_timer.data = (unsigned long)urb;
-       xhci->rh.rh_int_timer.expires = jiffies
-                + (HZ * (urb->interval < 30 ? 30 : urb->interval)) / 1000;
-       add_timer(&xhci->rh.rh_int_timer);
-
-       return 0;
-}
-
-#define OK(x)                  len = (x); break
-
-/* Root Hub Control Pipe */
-static int rh_submit_urb(struct urb *urb)
-{
-       unsigned int pipe = urb->pipe;
-       struct usb_ctrlrequest *cmd =
-                (struct usb_ctrlrequest *)urb->setup_packet;
-       void *data = urb->transfer_buffer;
-       int leni = urb->transfer_buffer_length;
-       int len = 0;
-       xhci_port_t *status;
-       int stat = 0;
-       int i;
-       int retstatus;
-        unsigned long flags;
-        
-       __u16 cstatus;
-       __u16 bmRType_bReq;
-       __u16 wValue;
-       __u16 wIndex;
-       __u16 wLength;
-
-       if (usb_pipetype(pipe) == PIPE_INTERRUPT) {
-               xhci->rh.urb = urb;
-               xhci->rh.send = 1;
-               xhci->rh.interval = urb->interval;
-               rh_init_int_timer(urb);
-
-               return -EINPROGRESS;
-       }
-
-       bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
-       wValue = le16_to_cpu(cmd->wValue);
-       wIndex = le16_to_cpu(cmd->wIndex);
-       wLength = le16_to_cpu(cmd->wLength);
-
-       for (i = 0; i < 8; i++)
-               xhci->rh.c_p_r[i] = 0;
-
-        status = &xhci->rh.ports[wIndex - 1];
-
-        spin_lock_irqsave(&xhci->rh.port_state_lock, flags);
-
-       switch (bmRType_bReq) {
-               /* Request Destination:
-                  without flags: Device,
-                  RH_INTERFACE: interface,
-                  RH_ENDPOINT: endpoint,
-                  RH_CLASS means HUB here,
-                  RH_OTHER | RH_CLASS  almost ever means HUB_PORT here
-               */
-
-       case RH_GET_STATUS:
-               *(__u16 *)data = cpu_to_le16(1);
-               OK(2);
-       case RH_GET_STATUS | RH_INTERFACE:
-               *(__u16 *)data = cpu_to_le16(0);
-               OK(2);
-       case RH_GET_STATUS | RH_ENDPOINT:
-               *(__u16 *)data = cpu_to_le16(0);
-               OK(2);
-       case RH_GET_STATUS | RH_CLASS:
-               *(__u32 *)data = cpu_to_le32(0);
-               OK(4);          /* hub power */
-       case RH_GET_STATUS | RH_OTHER | RH_CLASS:
-               cstatus = (status->cs_chg) |
-                       (status->pe_chg << 1) |
-                       (xhci->rh.c_p_r[wIndex - 1] << 4);
-               retstatus = (status->cs) |
-                       (status->pe << 1) |
-                       (status->susp << 2) |
-                       (1 << 8) |      /* power on */
-                       (status->lsda << 9);
-               *(__u16 *)data = cpu_to_le16(retstatus);
-               *(__u16 *)(data + 2) = cpu_to_le16(cstatus);
-               OK(4);
-       case RH_CLEAR_FEATURE | RH_ENDPOINT:
-               switch (wValue) {
-               case RH_ENDPOINT_STALL:
-                       OK(0);
-               }
-               break;
-       case RH_CLEAR_FEATURE | RH_CLASS:
-               switch (wValue) {
-               case RH_C_HUB_OVER_CURRENT:
-                       OK(0);  /* hub power over current */
-               }
-               break;
-       case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
-               switch (wValue) {
-               case RH_PORT_ENABLE:
-                        status->pe     = 0;
-                       OK(0);
-               case RH_PORT_SUSPEND:
-                        status->susp   = 0;
-                       OK(0);
-               case RH_PORT_POWER:
-                       OK(0);  /* port power */
-               case RH_C_PORT_CONNECTION:
-                        status->cs_chg = 0;
-                       OK(0);
-               case RH_C_PORT_ENABLE:
-                        status->pe_chg = 0;
-                       OK(0);
-               case RH_C_PORT_SUSPEND:
-                       /*** WR_RH_PORTSTAT(RH_PS_PSSC); */
-                       OK(0);
-               case RH_C_PORT_OVER_CURRENT:
-                       OK(0);  /* port power over current */
-               case RH_C_PORT_RESET:
-                       xhci->rh.c_p_r[wIndex - 1] = 0;
-                       OK(0);
-               }
-               break;
-       case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
-               switch (wValue) {
-               case RH_PORT_SUSPEND:
-                        status->susp = 1;      
-                       OK(0);
-               case RH_PORT_RESET:
-                {
-                        int ret;
-                        xhci->rh.c_p_r[wIndex - 1] = 1;
-                        status->pr = 0;
-                        status->pe = 1;
-                        ret = xhci_port_reset(wIndex - 1);
-                        /* XXX MAW: should probably cancel queued transfers 
during reset... *\/ */
-                        if ( ret == 0 ) { OK(0); }
-                        else { return ret; }
-                }
-                break;
-               case RH_PORT_POWER:
-                       OK(0); /* port power ** */
-               case RH_PORT_ENABLE:
-                        status->pe = 1;
-                       OK(0);
-               }
-               break;
-       case RH_SET_ADDRESS:
-               xhci->rh.devnum = wValue;
-               OK(0);
-       case RH_GET_DESCRIPTOR:
-               switch ((wValue & 0xff00) >> 8) {
-               case 0x01:      /* device descriptor */
-                       len = min_t(unsigned int, leni,
-                                 min_t(unsigned int,
-                                     sizeof(root_hub_dev_des), wLength));
-                       memcpy(data, root_hub_dev_des, len);
-                       OK(len);
-               case 0x02:      /* configuration descriptor */
-                       len = min_t(unsigned int, leni,
-                                 min_t(unsigned int,
-                                     sizeof(root_hub_config_des), wLength));
-                       memcpy (data, root_hub_config_des, len);
-                       OK(len);
-               case 0x03:      /* string descriptors */
-                       len = usb_root_hub_string (wValue & 0xff,
-                               0, "XHCI-alt",
-                               data, wLength);
-                       if (len > 0) {
-                               OK(min_t(int, leni, len));
-                       } else 
-                               stat = -EPIPE;
-               }
-               break;
-       case RH_GET_DESCRIPTOR | RH_CLASS:
-               root_hub_hub_des[2] = xhci->rh.numports;
-               len = min_t(unsigned int, leni,
-                         min_t(unsigned int, sizeof(root_hub_hub_des), 
wLength));
-               memcpy(data, root_hub_hub_des, len);
-               OK(len);
-       case RH_GET_CONFIGURATION:
-               *(__u8 *)data = 0x01;
-               OK(1);
-       case RH_SET_CONFIGURATION:
-               OK(0);
-       case RH_GET_INTERFACE | RH_INTERFACE:
-               *(__u8 *)data = 0x00;
-               OK(1);
-       case RH_SET_INTERFACE | RH_INTERFACE:
-               OK(0);
-       default:
-               stat = -EPIPE;
-       }
-
-        spin_unlock_irqrestore(&xhci->rh.port_state_lock, flags);
-
-       urb->actual_length = len;
-
-       return stat;
-}
-
-/*
- * MUST be called with urb->lock acquired
- */
-static int rh_unlink_urb(struct urb *urb)
-{
-       if (xhci->rh.urb == urb) {
-               urb->status = -ENOENT;
-               xhci->rh.send = 0;
-               xhci->rh.urb = NULL;
-               del_timer(&xhci->rh.rh_int_timer);
-       }
-       return 0;
-}
-
-/******************************************************************************
- * CONTROL PLANE FUNCTIONALITY
- */
-
-/**
- * alloc_xhci - initialise a new virtual root hub for a new USB device channel
- */
-static int alloc_xhci(void)
-{
-       int retval;
-       struct usb_bus *bus;
-
-       retval = -EBUSY;
-
-       xhci = kmalloc(sizeof(*xhci), GFP_KERNEL);
-       if (!xhci) {
-               err("couldn't allocate xhci structure");
-               retval = -ENOMEM;
-               goto err_alloc_xhci;
-       }
-
-       xhci->state = USBIF_STATE_CLOSED;
-
-       spin_lock_init(&xhci->urb_list_lock);
-       INIT_LIST_HEAD(&xhci->urb_list);
-
-       spin_lock_init(&xhci->complete_list_lock);
-       INIT_LIST_HEAD(&xhci->complete_list);
-
-       spin_lock_init(&xhci->frame_list_lock);
-
-       bus = usb_alloc_bus(&xhci_device_operations);
-
-       if (!bus) {
-               err("unable to allocate bus");
-               goto err_alloc_bus;
-       }
-
-       xhci->bus = bus;
-       bus->bus_name = "XHCI";
-       bus->hcpriv = xhci;
-
-       usb_register_bus(xhci->bus);
-
-       /* Initialize the root hub */
-
-       xhci->rh.numports = 0;
-
-       xhci->bus->root_hub = xhci->rh.dev = usb_alloc_dev(NULL, xhci->bus);
-       if (!xhci->rh.dev) {
-               err("unable to allocate root hub");
-               goto err_alloc_root_hub;
-       }
-
-       xhci->state = 0;
-
-       return 0;
-
-/*
- * error exits:
- */
-err_alloc_root_hub:
-        usb_deregister_bus(xhci->bus);
-       usb_free_bus(xhci->bus);
-       xhci->bus = NULL;
-
-err_alloc_bus:
-       kfree(xhci);
-
-err_alloc_xhci:
-       return retval;
-}
-
-/**
- * usbif_status_change - deal with an incoming USB_INTERFACE_STATUS_ message
- */
-static void usbif_status_change(usbif_fe_interface_status_changed_t *status)
-{
-    ctrl_msg_t                   cmsg;
-    usbif_fe_interface_connect_t up;
-    long rc;
-    usbif_sring_t *sring;
-
-    switch ( status->status )
-    {
-    case USBIF_INTERFACE_STATUS_DESTROYED:
-        printk(KERN_WARNING "Unexpected usbif-DESTROYED message in state %d\n",
-               xhci->state);
-        break;
-
-    case USBIF_INTERFACE_STATUS_DISCONNECTED:
-        if ( xhci->state != USBIF_STATE_CLOSED )
-        {
-            printk(KERN_WARNING "Unexpected usbif-DISCONNECTED message"
-                   " in state %d\n", xhci->state);
-            break;
-            /* Not bothering to do recovery here for now.  Keep things
-             * simple. */
-
-            spin_lock_irq(&xhci->ring_lock);
-            
-            /* Clean up resources. */
-            free_page((unsigned long)xhci->usb_ring.sring);
-            unbind_evtchn_from_irqhandler(xhci->evtchn, xhci);
-
-            /* Plug the ring. */
-            xhci->recovery = 1;
-            wmb();
-            
-            spin_unlock_irq(&xhci->ring_lock);
-        }
-
-        /* Move from CLOSED to DISCONNECTED state. */
-        sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL);
-        SHARED_RING_INIT(sring);
-        FRONT_RING_INIT(&xhci->usb_ring, sring, PAGE_SIZE);
-        xhci->state  = USBIF_STATE_DISCONNECTED;
-
-        /* Construct an interface-CONNECT message for the domain controller. */
-        cmsg.type      = CMSG_USBIF_FE;
-        cmsg.subtype   = CMSG_USBIF_FE_INTERFACE_CONNECT;
-        cmsg.length    = sizeof(usbif_fe_interface_connect_t);
-        up.shmem_frame = virt_to_mfn(sring);
-        memcpy(cmsg.msg, &up, sizeof(up));
-        
-        /* Tell the controller to bring up the interface. */
-        ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-        break;
-
-    case USBIF_INTERFACE_STATUS_CONNECTED:
-        if ( xhci->state == USBIF_STATE_CLOSED )
-        {
-            printk(KERN_WARNING "Unexpected usbif-CONNECTED message"
-                   " in state %d\n", xhci->state);
-            break;
-        }
-
-        xhci->evtchn = status->evtchn;
-       xhci->bandwidth = status->bandwidth;
-       xhci->rh.numports = status->num_ports;
-
-        xhci->rh.ports = kmalloc (sizeof(xhci_port_t) * xhci->rh.numports, 
GFP_KERNEL);
-       
-       if ( xhci->rh.ports == NULL )
-            goto alloc_ports_nomem;
-       
-        memset(xhci->rh.ports, 0, sizeof(xhci_port_t) * xhci->rh.numports);
-
-       usb_connect(xhci->rh.dev);
-
-       if (usb_new_device(xhci->rh.dev) != 0) {
-               err("unable to start root hub");
-       }
-
-       /* Allocate the appropriate USB bandwidth here...  Need to
-         * somehow know what the total available is thought to be so we
-         * can calculate the reservation correctly. */
-       usb_claim_bandwidth(xhci->rh.dev, xhci->rh.urb,
-                           1000 - xhci->bandwidth, 0);
-
-        if ( (rc = bind_evtchn_to_irqhandler(xhci->evtchn, xhci_interrupt, 
-                               SA_SAMPLE_RANDOM, "usbif", xhci)) )
-                printk(KERN_ALERT"usbfront request_irq failed (%ld)\n",rc);
-
-       DPRINTK(KERN_INFO __FILE__
-                ": USB XHCI: SHM at %p (0x%lx), EVTCHN %d\n",
-                xhci->usb_ring.sring, virt_to_mfn(xhci->usbif),
-                xhci->evtchn);
-
-        xhci->state = USBIF_STATE_CONNECTED;
-
-        break;
-
-    default:
-        printk(KERN_WARNING "Status change to unknown value %d\n", 
-               status->status);
-        break;
-    }
-
-    return;
-
- alloc_ports_nomem:
-    printk(KERN_WARNING "Failed to allocate port memory, XHCI failed to 
connect.\n");
-    return;
-}
-
-/**
- * usbif_ctrlif_rx - demux control messages by subtype
- */
-static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
-{
-    switch ( msg->subtype )
-    {
-    case CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED:
-        usbif_status_change((usbif_fe_interface_status_changed_t *)
-                            &msg->msg[0]);
-        break;
-
-        /* New interface...? */
-    default:
-        msg->length = 0;
-        break;
-    }
-
-    ctrl_if_send_response(msg);
-}
-
-static void send_driver_up(void)
-{
-        control_msg_t cmsg;
-        usbif_fe_interface_status_changed_t st;
-
-        /* Send a driver-UP notification to the domain controller. */
-        cmsg.type      = CMSG_USBIF_FE;
-        cmsg.subtype   = CMSG_USBIF_FE_DRIVER_STATUS_CHANGED;
-        cmsg.length    = sizeof(usbif_fe_driver_status_changed_t);
-        st.status      = USBIF_DRIVER_STATUS_UP;
-        memcpy(cmsg.msg, &st, sizeof(st));
-        ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
-}
-
-void usbif_resume(void)
-{
-        int i;
-        
-        /* Fake disconnection on all virtual USB ports (suspending / migrating
-         * will destroy hard state associated will the USB devices anyhow). */
-        /* No need to lock here. */
-        for ( i = 0; i < xhci->rh.numports; i++ )
-        {
-                xhci->rh.ports[i].cs = 0;
-                xhci->rh.ports[i].cs_chg = 1;
-               xhci->rh.ports[i].pe = 0;
-        }
-        
-        send_driver_up();
-}
-
-static int __init xhci_hcd_init(void)
-{
-       int retval = -ENOMEM, i;
-
-       if ( (xen_start_info->flags & SIF_INITDOMAIN) ||
-            (xen_start_info->flags & SIF_USB_BE_DOMAIN) )
-                return 0;
-
-       info(DRIVER_DESC " " DRIVER_VERSION);
-
-       if (debug) {
-               errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
-               if (!errbuf)
-                       goto errbuf_failed;
-       }
-
-       xhci_up_cachep = kmem_cache_create("xhci_urb_priv",
-               sizeof(struct urb_priv), 0, 0, NULL, NULL);
-       if (!xhci_up_cachep)
-               goto up_failed;
-
-        /* Let the domain controller know we're here.  For now we wait until
-         * connection, as for the block and net drivers.  This is only strictly
-         * necessary if we're going to boot off a USB device. */
-        printk(KERN_INFO "Initialising Xen virtual USB hub\n");
-    
-        (void)ctrl_if_register_receiver(CMSG_USBIF_FE, usbif_ctrlif_rx,
-                                        CALLBACK_IN_BLOCKING_CONTEXT);
-        
-       alloc_xhci();
-
-        send_driver_up();
-
-        /*
-         * We should read 'nr_interfaces' from response message and wait
-         * for notifications before proceeding. For now we assume that we
-         * will be notified of exactly one interface.
-         */
-        for ( i=0; (xhci->state != USBIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
-        {
-            set_current_state(TASK_INTERRUPTIBLE);
-            schedule_timeout(1);
-        }
-        
-        if (xhci->state != USBIF_STATE_CONNECTED)
-            printk(KERN_WARNING "Timeout connecting USB frontend driver!\n");
-       
-       return 0;
-
-up_failed:
-       if (errbuf)
-               kfree(errbuf);
-
-errbuf_failed:
-       return retval;
-}
-
-module_init(xhci_hcd_init);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
diff -r eba5afe9aa37 -r 10759a44ce3b 
linux-2.6-xen-sparse/drivers/xen/usbfront/xhci.h
--- a/linux-2.6-xen-sparse/drivers/xen/usbfront/xhci.h  Thu Sep 22 15:05:44 2005
+++ /dev/null   Thu Sep 22 15:12:14 2005
@@ -1,182 +0,0 @@
-/******************************************************************************
- * xhci.h
- *
- * Private definitions for the Xen Virtual USB Controller.  Based on
- * drivers/usb/host/uhci.h from Linux.  Copyright for the imported content is
- * retained by the original authors.
- *
- * Modifications are:
- * Copyright (C) 2004 Intel Research Cambridge
- * Copyright (C) 2004, 2005 Mark Williamson
- */
-
-#ifndef __LINUX_XHCI_H
-#define __LINUX_XHCI_H
-
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <asm-xen/xen-public/io/usbif.h>
-#include <linux/spinlock.h>
-
-/* xhci_port_t - current known state of a virtual hub ports */
-typedef struct {
-        unsigned int cs     :1; /* Connection status.         */
-        unsigned int cs_chg :1; /* Connection status change.  */
-        unsigned int pe     :1; /* Port enable.               */
-        unsigned int pe_chg :1; /* Port enable change.        */
-        unsigned int susp   :1; /* Suspended.                 */
-        unsigned int lsda   :1; /* Low speed device attached. */
-        unsigned int pr     :1; /* Port reset.                */
-} xhci_port_t;
-
-/* struct virt_root_hub - state related to the virtual root hub */
-struct virt_root_hub {
-       struct usb_device *dev;
-       int devnum;             /* Address of Root Hub endpoint */
-       struct urb *urb;
-       void *int_addr;
-       int send;
-       int interval;
-       int numports;
-       int c_p_r[8];
-       struct timer_list rh_int_timer;
-        spinlock_t port_state_lock;
-        xhci_port_t *ports;
-};
-
-/* struct xhci - contains the state associated with a single USB interface */
-struct xhci {
-
-#ifdef CONFIG_PROC_FS
-       /* procfs */
-       int num;
-       struct proc_dir_entry *proc_entry;
-#endif
-
-        int evtchn;                        /* Interdom channel to backend */
-        enum { 
-                USBIF_STATE_CONNECTED    = 2,
-                USBIF_STATE_DISCONNECTED = 1,
-                USBIF_STATE_CLOSED       = 0
-        } state; /* State of this USB interface */
-        unsigned long recovery; /* boolean recovery in progress flag */
-        
-        unsigned long bandwidth;
-
-       struct usb_bus *bus;
-
-       /* Main list of URB's currently controlled by this HC */
-       spinlock_t urb_list_lock;
-       struct list_head urb_list;              /* P: xhci->urb_list_lock */
-
-       /* List of URB's awaiting completion callback */
-       spinlock_t complete_list_lock;
-       struct list_head complete_list;         /* P: xhci->complete_list_lock 
*/
-
-       struct virt_root_hub rh;        /* private data of the virtual root hub 
*/
-
-        spinlock_t ring_lock;
-        usbif_front_ring_t usb_ring;
-
-        int awaiting_reset;
-};
-
-/* per-URB private data structure for the host controller */
-struct urb_priv {
-       struct urb *urb;
-        usbif_iso_t *schedule;
-       struct usb_device *dev;
-
-        int in_progress : 1;           /* QH was queued (not linked in) */
-       int short_control_packet : 1;   /* If we get a short packet during */
-                                       /*  a control transfer, retrigger */
-                                       /*  the status phase */
-
-       int status;                     /* Final status */
-
-       unsigned long inserttime;       /* In jiffies */
-
-       struct list_head complete_list; /* P: xhci->complete_list_lock */
-};
-
-/*
- * Locking in xhci.c
- *
- * spinlocks are used extensively to protect the many lists and data
- * structures we have. It's not that pretty, but it's necessary. We
- * need to be done with all of the locks (except complete_list_lock) when
- * we call urb->complete. I've tried to make it simple enough so I don't
- * have to spend hours racking my brain trying to figure out if the
- * locking is safe.
- *
- * Here's the safe locking order to prevent deadlocks:
- *
- * #1 xhci->urb_list_lock
- * #2 urb->lock
- * #3 xhci->urb_remove_list_lock
- * #4 xhci->complete_list_lock
- *
- * If you're going to grab 2 or more locks at once, ALWAYS grab the lock
- * at the lowest level FIRST and NEVER grab locks at the same level at the
- * same time.
- * 
- * So, if you need xhci->urb_list_lock, grab it before you grab urb->lock
- */
-
-/* -------------------------------------------------------------------------
-   Virtual Root HUB
-   ------------------------------------------------------------------------- */
-/* destination of request */
-#define RH_DEVICE              0x00
-#define RH_INTERFACE           0x01
-#define RH_ENDPOINT            0x02
-#define RH_OTHER               0x03
-
-#define RH_CLASS               0x20
-#define RH_VENDOR              0x40
-
-/* Requests: bRequest << 8 | bmRequestType */
-#define RH_GET_STATUS          0x0080
-#define RH_CLEAR_FEATURE       0x0100
-#define RH_SET_FEATURE         0x0300
-#define RH_SET_ADDRESS         0x0500
-#define RH_GET_DESCRIPTOR      0x0680
-#define RH_SET_DESCRIPTOR      0x0700
-#define RH_GET_CONFIGURATION   0x0880
-#define RH_SET_CONFIGURATION   0x0900
-#define RH_GET_STATE           0x0280
-#define RH_GET_INTERFACE       0x0A80
-#define RH_SET_INTERFACE       0x0B00
-#define RH_SYNC_FRAME          0x0C80
-/* Our Vendor Specific Request */
-#define RH_SET_EP              0x2000
-
-/* Hub port features */
-#define RH_PORT_CONNECTION     0x00
-#define RH_PORT_ENABLE         0x01
-#define RH_PORT_SUSPEND                0x02
-#define RH_PORT_OVER_CURRENT   0x03
-#define RH_PORT_RESET          0x04
-#define RH_PORT_POWER          0x08
-#define RH_PORT_LOW_SPEED      0x09
-#define RH_C_PORT_CONNECTION   0x10
-#define RH_C_PORT_ENABLE       0x11
-#define RH_C_PORT_SUSPEND      0x12
-#define RH_C_PORT_OVER_CURRENT 0x13
-#define RH_C_PORT_RESET                0x14
-
-/* Hub features */
-#define RH_C_HUB_LOCAL_POWER   0x00
-#define RH_C_HUB_OVER_CURRENT  0x01
-#define RH_DEVICE_REMOTE_WAKEUP        0x00
-#define RH_ENDPOINT_STALL      0x01
-
-/* Our Vendor Specific feature */
-#define RH_REMOVE_EP           0x00
-
-#define RH_ACK                 0x01
-#define RH_REQ_ERR             -1
-#define RH_NACK                        0x00
-
-#endif
-

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>