WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] merge with xen-unstable.hg staging

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] merge with xen-unstable.hg staging
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 23 Jan 2008 01:11:25 -0800
Delivery-date: Wed, 23 Jan 2008 01:19:04 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1200689388 25200
# Node ID a868bd4236e6da06dd8dad88b66ad4f55dcc4b50
# Parent  7643472d6b436a7433b98e8708979e2674fa9047
# Parent  1e6455d608bd06a3cc4ec83fd2cf5943d0dac20f
merge with xen-unstable.hg staging
---
 docs/man/xm.pod.1                         |   13 
 extras/mini-os/Makefile                   |    2 
 extras/mini-os/arch/ia64/mm.c             |    9 
 extras/mini-os/arch/x86/minios-x86_32.lds |    2 
 extras/mini-os/arch/x86/minios-x86_64.lds |    2 
 extras/mini-os/arch/x86/mm.c              |  163 +++-
 extras/mini-os/arch/x86/traps.c           |   44 +
 extras/mini-os/blkfront.c                 |  392 ++++++++++
 extras/mini-os/fs-front.c                 | 1126 ++++++++++++++++++++++++++++++
 extras/mini-os/include/blkfront.h         |   26 
 extras/mini-os/include/fs.h               |   51 +
 extras/mini-os/include/ia64/arch_mm.h     |    4 
 extras/mini-os/include/mm.h               |    5 
 extras/mini-os/include/types.h            |   10 
 extras/mini-os/include/x86/arch_mm.h      |    7 
 extras/mini-os/include/x86/traps.h        |    4 
 extras/mini-os/kernel.c                   |    7 
 tools/examples/network-nat                |    6 
 tools/fs-back/Makefile                    |   40 +
 tools/fs-back/fs-backend.c                |  346 +++++++++
 tools/fs-back/fs-backend.h                |   86 ++
 tools/fs-back/fs-ops.c                    |  658 +++++++++++++++++
 tools/fs-back/fs-xenbus.c                 |  180 ++++
 tools/ioemu/target-i386-dm/helper2.c      |   12 
 tools/python/xen/xend/image.py            |    2 
 tools/xentrace/xenctx.c                   |  104 +-
 unmodified_drivers/linux-2.6/mkbuildtree  |   59 -
 xen/arch/ia64/xen/hypercall.c             |    5 
 xen/arch/x86/hvm/vmx/vtd/dmar.c           |  223 +++--
 xen/arch/x86/hvm/vmx/vtd/dmar.h           |   31 
 xen/arch/x86/hvm/vmx/vtd/utils.c          |    9 
 xen/arch/x86/traps.c                      |    2 
 xen/arch/x86/x86_32/traps.c               |    2 
 xen/arch/x86/x86_64/traps.c               |    2 
 xen/include/asm-x86/hvm/vmx/intel-iommu.h |   93 +-
 xen/include/asm-x86/hypercall.h           |    2 
 xen/include/public/arch-ia64.h            |   15 
 xen/include/public/arch-powerpc.h         |   13 
 xen/include/public/arch-x86/xen.h         |   13 
 xen/include/public/io/fsif.h              |  181 ++++
 xen/include/public/xen.h                  |   13 
 41 files changed, 3692 insertions(+), 272 deletions(-)

diff -r 7643472d6b43 -r a868bd4236e6 docs/man/xm.pod.1
--- a/docs/man/xm.pod.1 Thu Jan 17 12:17:14 2008 -0700
+++ b/docs/man/xm.pod.1 Fri Jan 18 13:49:48 2008 -0700
@@ -254,9 +254,12 @@ domain, as it may balloon down its memor
 
 =item B<mem-set> I<domain-id> I<mem>
 
-Set the domain's used memory using the balloon driver.  Because this
-operation requires cooperation from the domain operating system, there
-is no guarantee that it will succeed.
+Set the domain's used memory using the balloon driver.
+
+Because this operation requires cooperation from the domain operating
+system, there is no guarantee that it will succeed.  This command will
+definitely not work unless the domain has the required paravirt
+driver.
 
 B<Warning:> There is no good way to know in advance how small of a
 mem-set will make a domain unstable and cause it to crash.  Be very
@@ -391,6 +394,10 @@ Attempting to set the VCPUs to a number 
 Attempting to set the VCPUs to a number larger than the initially
 configured VCPU count is an error.  Trying to set VCPUs to < 1 will be
 quietly ignored.
+
+Because this operation requires cooperation from the domain operating
+system, there is no guarantee that it will succeed.  This command will
+not work with a full virt domain.
 
 =item B<vcpu-list> [I<domain-id>]
 
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/Makefile
--- a/extras/mini-os/Makefile   Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/Makefile   Fri Jan 18 13:49:48 2008 -0700
@@ -53,7 +53,7 @@ include minios.mk
 # Define some default flags for linking.
 LDLIBS := 
 LDARCHLIB := -L$(TARGET_ARCH_DIR) -l$(ARCH_LIB_NAME)
-LDFLAGS_FINAL := -N -T $(TARGET_ARCH_DIR)/minios-$(TARGET_ARCH).lds
+LDFLAGS_FINAL := -T $(TARGET_ARCH_DIR)/minios-$(TARGET_ARCH).lds
 
 # Prefix for global API names. All other symbols are localised before
 # linking with EXTRA_OBJS.
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/arch/ia64/mm.c
--- a/extras/mini-os/arch/ia64/mm.c     Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/arch/ia64/mm.c     Fri Jan 18 13:49:48 2008 -0700
@@ -124,9 +124,14 @@ arch_init_demand_mapping_area(unsigned l
 
 /* Helper function used in gnttab.c. */
 void*
-map_frames(unsigned long* frames, unsigned long n)
+map_frames_ex(unsigned long* frames, unsigned long n, unsigned long stride,
+       unsigned long increment, unsigned long alignment, domid_t id,
+       int may_fail, unsigned long prot)
 {
-       n = n;
+        /* TODO: incomplete! */
+        ASSERT(n == 1 || (stride == 0 && increment == 1));
+        ASSERT(id == DOMID_SELF);
+        ASSERT(prot == 0);
        return (void*) __va(SWAP(frames[0]) << PAGE_SHIFT);
 }
 
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/arch/x86/minios-x86_32.lds
--- a/extras/mini-os/arch/x86/minios-x86_32.lds Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/arch/x86/minios-x86_32.lds Fri Jan 18 13:49:48 2008 -0700
@@ -13,6 +13,8 @@ SECTIONS
   _etext = .;                  /* End of text section */
 
   .rodata : { *(.rodata) *(.rodata.*) }
+  . = ALIGN(4096);
+  _erodata = .;
 
   .data : {                    /* Data */
        *(.data)
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/arch/x86/minios-x86_64.lds
--- a/extras/mini-os/arch/x86/minios-x86_64.lds Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/arch/x86/minios-x86_64.lds Fri Jan 18 13:49:48 2008 -0700
@@ -13,6 +13,8 @@ SECTIONS
   _etext = .;                  /* End of text section */
 
   .rodata : { *(.rodata) *(.rodata.*) }
+  . = ALIGN(4096);
+  _erodata = .;
 
   .data : {                    /* Data */
        *(.data)
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/arch/x86/mm.c
--- a/extras/mini-os/arch/x86/mm.c      Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/arch/x86/mm.c      Fri Jan 18 13:49:48 2008 -0700
@@ -40,6 +40,7 @@
 #include <types.h>
 #include <lib.h>
 #include <xmalloc.h>
+#include <xen/memory.h>
 
 #ifdef MM_DEBUG
 #define DEBUG(_f, _a...) \
@@ -49,6 +50,7 @@
 #endif
 
 unsigned long *phys_to_machine_mapping;
+unsigned long mfn_zero;
 extern char stack[];
 extern void page_walk(unsigned long virt_addr);
 
@@ -270,12 +272,73 @@ void build_pagetable(unsigned long *star
         start_address += PAGE_SIZE;
     }
 
-    if (HYPERVISOR_update_va_mapping(0, (pte_t) {}, UVMF_INVLPG))
-        printk("Unable to unmap page 0\n");
-
     *start_pfn = pt_pfn;
 }
 
+extern void shared_info;
+static void set_readonly(void *text, void *etext)
+{
+    unsigned long start_address = ((unsigned long) text + PAGE_SIZE - 1) & 
PAGE_MASK;
+    unsigned long end_address = (unsigned long) etext;
+    static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
+    pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
+    unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+    unsigned long offset;
+    int count = 0;
+
+    printk("setting %p-%p readonly\n", text, etext);
+
+    while (start_address + PAGE_SIZE <= end_address) {
+        tab = (pgentry_t *)start_info.pt_base;
+        mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+
+#if defined(__x86_64__)
+        offset = l4_table_offset(start_address);
+        page = tab[offset];
+        mfn = pte_to_mfn(page);
+        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+        offset = l3_table_offset(start_address);
+        page = tab[offset];
+        mfn = pte_to_mfn(page);
+        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+#endif
+        offset = l2_table_offset(start_address);        
+        page = tab[offset];
+        mfn = pte_to_mfn(page);
+        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+
+        offset = l1_table_offset(start_address);
+
+       if (start_address != (unsigned long)&shared_info) {
+           mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + 
sizeof(pgentry_t) * offset;
+           mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
+           count++;
+       } else
+           printk("skipped %p\n", start_address);
+
+        start_address += PAGE_SIZE;
+
+        if (count == L1_PAGETABLE_ENTRIES || start_address + PAGE_SIZE > 
end_address)
+        {
+            if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
+            {
+                printk("PTE could not be updated\n");
+                do_exit();
+            }
+            count = 0;
+        }
+    }
+
+    {
+       mmuext_op_t op = {
+           .cmd = MMUEXT_TLB_FLUSH_ALL,
+       };
+       int count;
+       HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
+    }
+}
 
 void mem_test(unsigned long *start_add, unsigned long *end_add)
 {
@@ -305,6 +368,7 @@ void mem_test(unsigned long *start_add, 
 
 static pgentry_t *demand_map_pgt;
 static void *demand_map_area_start;
+#define DEMAND_MAP_PAGES 1024
 
 void arch_init_demand_mapping_area(unsigned long max_pfn)
 {
@@ -364,20 +428,19 @@ void arch_init_demand_mapping_area(unsig
     printk("Initialised demand area.\n");
 }
 
-void *map_frames(unsigned long *f, unsigned long n)
+#define MAP_BATCH ((STACK_SIZE / 2) / sizeof(mmu_update_t))
+
+void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
+       unsigned long increment, unsigned long alignment, domid_t id,
+       int may_fail, unsigned long prot)
 {
     unsigned long x;
     unsigned long y = 0;
-    mmu_update_t mmu_updates[16];
     int rc;
-
-    if (n > 16) {
-        printk("Tried to map too many (%ld) frames at once.\n", n);
-        return NULL;
-    }
+    unsigned long done = 0;
 
     /* Find a run of n contiguous frames */
-    for (x = 0; x <= 1024 - n; x += y + 1) {
+    for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & 
~(alignment - 1)) {
         for (y = 0; y < n; y++)
             if (demand_map_pgt[x+y] & _PAGE_PRESENT)
                 break;
@@ -385,26 +448,68 @@ void *map_frames(unsigned long *f, unsig
             break;
     }
     if (y != n) {
-        printk("Failed to map %ld frames!\n", n);
+        printk("Failed to find %ld frames!\n", n);
         return NULL;
     }
 
     /* Found it at x.  Map it in. */
-    for (y = 0; y < n; y++) {
-        mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]);
-        mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT;
-    }
-
-    rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF);
-    if (rc < 0) {
-        printk("Map %ld failed: %d.\n", n, rc);
-        return NULL;
-    } else {
-        return (void *)(unsigned long)((unsigned long)demand_map_area_start +
-                x * PAGE_SIZE);
-    }
-}
-
+
+    while (done < n) {
+       unsigned long todo;
+
+       if (may_fail)
+           todo = 1;
+       else
+           todo = n - done;
+
+       if (todo > MAP_BATCH)
+               todo = MAP_BATCH;
+
+       {
+           mmu_update_t mmu_updates[todo];
+
+           for (y = 0; y < todo; y++) {
+               mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + done + 
y]);
+               mmu_updates[y].val = ((f[(done + y) * stride] + (done + y) * 
increment) << PAGE_SHIFT) | prot;
+           }
+
+           rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
+           if (rc < 0) {
+               if (may_fail)
+                   f[done * stride] |= 0xF0000000;
+               else {
+                   printk("Map %ld (%lx, ...) failed: %d.\n", todo, f[done * 
stride], rc);
+                   return NULL;
+               }
+           }
+       }
+
+       done += todo;
+    }
+    return (void *)(unsigned long)((unsigned long)demand_map_area_start +
+           x * PAGE_SIZE);
+}
+
+static void clear_bootstrap(void)
+{
+    struct xen_memory_reservation reservation;
+    xen_pfn_t mfns[] = { virt_to_mfn(&shared_info) };
+    int n = sizeof(mfns)/sizeof(*mfns);
+    pte_t nullpte = { };
+
+    /* Use page 0 as the CoW zero page */
+    memset(NULL, 0, PAGE_SIZE);
+    mfn_zero = pfn_to_mfn(0);
+    if (HYPERVISOR_update_va_mapping(0, nullpte, UVMF_INVLPG))
+       printk("Unable to unmap page 0\n");
+
+    set_xen_guest_handle(reservation.extent_start, mfns);
+    reservation.nr_extents = n;
+    reservation.extent_order = 0;
+    reservation.domid = DOMID_SELF;
+    if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != n)
+       printk("Unable to free bootstrap pages\n");
+}
 
 void arch_init_p2m(unsigned long max_pfn)
 {
@@ -455,6 +560,7 @@ void arch_init_mm(unsigned long* start_p
 
     printk("  _text:        %p\n", &_text);
     printk("  _etext:       %p\n", &_etext);
+    printk("  _erodata:     %p\n", &_erodata);
     printk("  _edata:       %p\n", &_edata);
     printk("  stack start:  %p\n", stack);
     printk("  _end:         %p\n", &_end);
@@ -468,8 +574,9 @@ void arch_init_mm(unsigned long* start_p
     printk("  max_pfn:      %lx\n", max_pfn);
 
     build_pagetable(&start_pfn, &max_pfn);
+    clear_bootstrap();
+    set_readonly(&_text, &_erodata);
 
     *start_pfn_p = start_pfn;
     *max_pfn_p = max_pfn;
 }
-
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/arch/x86/traps.c
--- a/extras/mini-os/arch/x86/traps.c   Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/arch/x86/traps.c   Fri Jan 18 13:49:48 2008 -0700
@@ -118,6 +118,46 @@ void page_walk(unsigned long virt_addres
 
 }
 
+static int handle_cow(unsigned long addr) {
+        pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
+       unsigned long new_page;
+       int rc;
+
+#if defined(__x86_64__)
+        page = tab[l4_table_offset(addr)];
+       if (!(page & _PAGE_PRESENT))
+           return 0;
+        tab = pte_to_virt(page);
+#endif
+#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
+        page = tab[l3_table_offset(addr)];
+       if (!(page & _PAGE_PRESENT))
+           return 0;
+        tab = pte_to_virt(page);
+#endif
+        page = tab[l2_table_offset(addr)];
+       if (!(page & _PAGE_PRESENT))
+           return 0;
+        tab = pte_to_virt(page);
+        
+        page = tab[l1_table_offset(addr)];
+       if (!(page & _PAGE_PRESENT))
+           return 0;
+       /* Only support CoW for the zero page.  */
+       if (PHYS_PFN(page) != mfn_zero)
+           return 0;
+
+       new_page = alloc_pages(0);
+       memset((void*) new_page, 0, PAGE_SIZE);
+
+       rc = HYPERVISOR_update_va_mapping(addr & PAGE_MASK, 
__pte(virt_to_mach(new_page) | L1_PROT), UVMF_INVLPG);
+       if (!rc)
+               return 1;
+
+       printk("Map zero page to %lx failed: %d.\n", addr, rc);
+       return 0;
+}
+
 #define read_cr2() \
         (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
 
@@ -126,6 +166,10 @@ void do_page_fault(struct pt_regs *regs,
 void do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
     unsigned long addr = read_cr2();
+
+    if ((error_code & TRAP_PF_WRITE) && handle_cow(addr))
+       return;
+
     /* If we are already handling a page fault, and got another one
        that means we faulted in pagetable walk. Continuing here would cause
        a recursive fault */       
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/blkfront.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/blkfront.c Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,392 @@
+/* Minimal block driver for Mini-OS. 
+ * Copyright (c) 2007-2008 Samuel Thibault.
+ * Based on netfront.c.
+ */
+
+#include <os.h>
+#include <xenbus.h>
+#include <events.h>
+#include <errno.h>
+#include <xen/io/blkif.h>
+#include <gnttab.h>
+#include <xmalloc.h>
+#include <time.h>
+#include <blkfront.h>
+#include <lib.h>
+#include <fcntl.h>
+
+/* Note: we generally don't need to disable IRQs since we hardly do anything in
+ * the interrupt handler.  */
+
+/* Note: we really suppose non-preemptive threads.  */
+
+DECLARE_WAIT_QUEUE_HEAD(blkfront_queue);
+
+
+
+
+#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+#define GRANT_INVALID_REF 0
+
+
+struct blk_buffer {
+    void* page;
+    grant_ref_t gref;
+};
+
+struct blkfront_dev {
+    struct blkif_front_ring ring;
+    grant_ref_t ring_ref;
+    evtchn_port_t evtchn, local_port;
+    blkif_vdev_t handle;
+
+    char *nodename;
+    char *backend;
+    unsigned sector_size;
+    unsigned sectors;
+    int mode;
+    int barrier;
+    int flush;
+};
+
+static inline int xenblk_rxidx(RING_IDX idx)
+{
+    return idx & (BLK_RING_SIZE - 1);
+}
+
+void blkfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
+{
+    wake_up(&blkfront_queue);
+}
+
+struct blkfront_dev *init_blkfront(char *nodename, uint64_t *sectors, unsigned 
*sector_size, int *mode)
+{
+    xenbus_transaction_t xbt;
+    char* err;
+    char* message=NULL;
+    struct blkif_sring *s;
+    int retry=0;
+    char* msg;
+    char* c;
+
+    struct blkfront_dev *dev;
+
+    ASSERT(!strncmp(nodename, "/local/domain/", 14));
+    nodename = strchr(nodename + 14, '/') + 1;
+
+    char path[strlen(nodename) + 1 + 10 + 1];
+
+    printk("******************* BLKFRONT for %s **********\n\n\n", nodename);
+
+    dev = malloc(sizeof(*dev));
+    dev->nodename = strdup(nodename);
+
+    s = (struct blkif_sring*) alloc_page();
+    memset(s,0,PAGE_SIZE);
+
+
+    SHARED_RING_INIT(s);
+    FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
+
+    dev->ring_ref = gnttab_grant_access(0,virt_to_mfn(s),0);
+
+    evtchn_alloc_unbound_t op;
+    op.dom = DOMID_SELF;
+    snprintf(path, sizeof(path), "%s/backend-id", nodename);
+    op.remote_dom = xenbus_read_integer(path); 
+    HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
+    clear_evtchn(op.port);        /* Without, handler gets invoked now! */
+    dev->local_port = bind_evtchn(op.port, blkfront_handler, dev);
+    dev->evtchn=op.port;
+
+    // FIXME: proper frees on failures
+again:
+    err = xenbus_transaction_start(&xbt);
+    if (err) {
+        printk("starting transaction\n");
+    }
+
+    err = xenbus_printf(xbt, nodename, "ring-ref","%u",
+                dev->ring_ref);
+    if (err) {
+        message = "writing ring-ref";
+        goto abort_transaction;
+    }
+    err = xenbus_printf(xbt, nodename,
+                "event-channel", "%u", dev->evtchn);
+    if (err) {
+        message = "writing event-channel";
+        goto abort_transaction;
+    }
+
+    err = xenbus_printf(xbt, nodename, "state", "%u",
+            4); /* connected */
+
+
+    err = xenbus_transaction_end(xbt, 0, &retry);
+    if (retry) {
+            goto again;
+        printk("completing transaction\n");
+    }
+
+    goto done;
+
+abort_transaction:
+    xenbus_transaction_end(xbt, 1, &retry);
+    return NULL;
+
+done:
+
+    snprintf(path, sizeof(path), "%s/backend", nodename);
+    msg = xenbus_read(XBT_NIL, path, &dev->backend);
+    if (msg) {
+        printk("Error %s when reading the backend path %s\n", msg, path);
+        return NULL;
+    }
+
+    printk("backend at %s\n", dev->backend);
+
+    dev->handle = simple_strtoul(strrchr(nodename, '/')+1, NULL, 0);
+
+    {
+        char path[strlen(dev->backend) + 1 + 19 + 1];
+        snprintf(path, sizeof(path), "%s/mode", dev->backend);
+        msg = xenbus_read(XBT_NIL, path, &c);
+        if (msg) {
+            printk("Error %s when reading the mode\n", msg);
+            return NULL;
+        }
+        if (*c == 'w')
+            *mode = dev->mode = O_RDWR;
+        else
+            *mode = dev->mode = O_RDONLY;
+        free(c);
+
+        snprintf(path, sizeof(path), "%s/state", dev->backend);
+
+        xenbus_watch_path(XBT_NIL, path);
+
+        xenbus_wait_for_value(path,"4");
+
+        xenbus_unwatch_path(XBT_NIL, path);
+
+        snprintf(path, sizeof(path), "%s/sectors", dev->backend);
+        // FIXME: read_integer returns an int, so disk size limited to 1TB for 
now
+        *sectors = dev->sectors = xenbus_read_integer(path);
+
+        snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
+        *sector_size = dev->sector_size = xenbus_read_integer(path);
+
+        snprintf(path, sizeof(path), "%s/feature-barrier", dev->backend);
+        dev->barrier = xenbus_read_integer(path);
+
+        snprintf(path, sizeof(path), "%s/feature-flush-cache", dev->backend);
+        dev->flush = xenbus_read_integer(path);
+    }
+
+    printk("%u sectors of %u bytes\n", dev->sectors, dev->sector_size);
+    printk("**************************\n");
+
+    return dev;
+}
+
+void shutdown_blkfront(struct blkfront_dev *dev)
+{
+    char* err;
+    char *nodename = dev->nodename;
+
+    char path[strlen(dev->backend) + 1 + 5 + 1];
+
+    blkfront_sync(dev);
+
+    printk("close blk: backend at %s\n",dev->backend);
+
+    snprintf(path, sizeof(path), "%s/state", dev->backend);
+    err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 5); /* closing */
+    xenbus_wait_for_value(path,"5");
+
+    err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 6);
+    xenbus_wait_for_value(path,"6");
+
+    unbind_evtchn(dev->local_port);
+
+    free(nodename);
+    free(dev->backend);
+    free(dev);
+}
+
+static void blkfront_wait_slot(struct blkfront_dev *dev)
+{
+    /* Wait for a slot */
+    if (RING_FULL(&dev->ring)) {
+       unsigned long flags;
+       DEFINE_WAIT(w);
+       local_irq_save(flags);
+       while (1) {
+           blkfront_aio_poll(dev);
+           if (!RING_FULL(&dev->ring))
+               break;
+           /* Really no slot, go to sleep. */
+           add_waiter(w, blkfront_queue);
+           local_irq_restore(flags);
+           schedule();
+           local_irq_save(flags);
+       }
+       remove_waiter(w);
+       local_irq_restore(flags);
+    }
+}
+
+/* Issue an aio */
+void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
+{
+    struct blkfront_dev *dev = aiocbp->aio_dev;
+    struct blkif_request *req;
+    RING_IDX i;
+    int notify;
+    int n, j;
+    uintptr_t start, end;
+
+    // Can't io at non-sector-aligned location
+    ASSERT(!(aiocbp->aio_offset & (dev->sector_size-1)));
+    // Can't io non-sector-sized amounts
+    ASSERT(!(aiocbp->aio_nbytes & (dev->sector_size-1)));
+    // Can't io non-sector-aligned buffer
+    ASSERT(!((uintptr_t) aiocbp->aio_buf & (dev->sector_size-1)));
+
+    start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
+    end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes + PAGE_SIZE - 1) & 
PAGE_MASK;
+    n = (end - start) / PAGE_SIZE;
+
+    /* qemu's IDE max multsect is 16 (8KB) and SCSI max DMA was set to 32KB,
+     * so max 44KB can't happen */
+    ASSERT(n <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+    blkfront_wait_slot(dev);
+    i = dev->ring.req_prod_pvt;
+    req = RING_GET_REQUEST(&dev->ring, i);
+
+    req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
+    req->nr_segments = n;
+    req->handle = dev->handle;
+    req->id = (uintptr_t) aiocbp;
+    req->sector_number = aiocbp->aio_offset / dev->sector_size;
+
+    for (j = 0; j < n; j++) {
+       uintptr_t data = start + j * PAGE_SIZE;
+       aiocbp->gref[j] = req->seg[j].gref =
+            gnttab_grant_access(0, virt_to_mfn(data), write);
+       req->seg[j].first_sect = 0;
+       req->seg[j].last_sect = PAGE_SIZE / dev->sector_size - 1;
+    }
+    req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) / 
dev->sector_size;
+    req->seg[n-1].last_sect = (((uintptr_t)aiocbp->aio_buf + 
aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->sector_size;
+
+    dev->ring.req_prod_pvt = i + 1;
+
+    wmb();
+    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
+
+    if(notify) notify_remote_via_evtchn(dev->evtchn);
+}
+
+void blkfront_aio_write(struct blkfront_aiocb *aiocbp)
+{
+    blkfront_aio(aiocbp, 1);
+}
+
+void blkfront_aio_read(struct blkfront_aiocb *aiocbp)
+{
+    blkfront_aio(aiocbp, 0);
+}
+
+int blkfront_aio_poll(struct blkfront_dev *dev)
+{
+    RING_IDX rp, cons;
+    struct blkif_response *rsp;
+
+moretodo:
+    rp = dev->ring.sring->rsp_prod;
+    rmb(); /* Ensure we see queued responses up to 'rp'. */
+    cons = dev->ring.rsp_cons;
+
+    int nr_consumed = 0;
+    while ((cons != rp))
+    {
+       rsp = RING_GET_RESPONSE(&dev->ring, cons);
+
+        switch (rsp->operation) {
+        case BLKIF_OP_READ:
+        case BLKIF_OP_WRITE:
+        {
+            struct blkfront_aiocb *aiocbp = (void*) (uintptr_t) rsp->id;
+            int n = (aiocbp->aio_nbytes + PAGE_SIZE - 1) / PAGE_SIZE, j;
+            for (j = 0; j < n; j++)
+                gnttab_end_access(aiocbp->gref[j]);
+
+            /* Nota: callback frees aiocbp itself */
+            aiocbp->aio_cb(aiocbp, rsp->status ? -EIO : 0);
+            break;
+        }
+        case BLKIF_OP_WRITE_BARRIER:
+        case BLKIF_OP_FLUSH_DISKCACHE:
+            break;
+        default:
+            printk("unrecognized block operation %d response\n", 
rsp->operation);
+            break;
+        }
+
+       nr_consumed++;
+       ++cons;
+    }
+    dev->ring.rsp_cons = cons;
+
+    int more;
+    RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
+    if (more) goto moretodo;
+
+    return nr_consumed;
+}
+
+static void blkfront_push_operation(struct blkfront_dev *dev, uint8_t op)
+{
+    int i;
+    struct blkif_request *req;
+    int notify;
+
+    blkfront_wait_slot(dev);
+    i = dev->ring.req_prod_pvt;
+    req = RING_GET_REQUEST(&dev->ring, i);
+    req->operation = op;
+    dev->ring.req_prod_pvt = i + 1;
+    wmb();
+    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
+    if (notify) notify_remote_via_evtchn(dev->evtchn);
+}
+
+void blkfront_sync(struct blkfront_dev *dev)
+{
+    unsigned long flags;
+
+    if (dev->barrier == 1)
+        blkfront_push_operation(dev, BLKIF_OP_WRITE_BARRIER);
+
+    if (dev->flush == 1)
+        blkfront_push_operation(dev, BLKIF_OP_FLUSH_DISKCACHE);
+
+    /* Note: This won't finish if another thread enqueues requests.  */
+    local_irq_save(flags);
+    DEFINE_WAIT(w);
+    while (1) {
+       blkfront_aio_poll(dev);
+       if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
+           break;
+
+       add_waiter(w, blkfront_queue);
+       local_irq_restore(flags);
+       schedule();
+       local_irq_save(flags);
+    }
+    remove_waiter(w);
+    local_irq_restore(flags);
+}
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/fs-front.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/fs-front.c Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,1126 @@
+/******************************************************************************
+ * fs-front.c
+ * 
+ * Frontend driver for FS split device driver.
+ *
+ * Copyright (c) 2007, Grzegorz Milos, Sun Microsystems, Inc.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#undef NDEBUG
+#include <os.h>
+#include <list.h>
+#include <xmalloc.h>
+#include <xenbus.h>
+#include <gnttab.h>
+#include <events.h>
+#include <xen/io/fsif.h>
+#include <fs.h>
+#include <sched.h>
+
+#define preempt_disable()
+#define preempt_enable()
+#define cmpxchg(p,o,n) synch_cmpxchg(p,o,n)
+
+
+#ifdef FS_DEBUG
+#define DEBUG(_f, _a...) \
+    printk("MINI_OS(file=fs-front.c, line=%d) " _f "\n", __LINE__, ## _a)
+#else
+#define DEBUG(_f, _a...)    ((void)0)
+#endif
+
+
+struct fs_request;
+struct fs_import *fs_import;
+
+/******************************************************************************/
+/*                      RING REQUEST/RESPONSES HANDLING                       
*/
+/******************************************************************************/
+
+struct fs_request
+{
+    void *page;
+    grant_ref_t gref;
+    struct thread *thread;                 /* Thread blocked on this request */
+    struct fsif_response shadow_rsp;       /* Response copy writen by the 
+                                              interrupt handler */  
+};
+
+/* Ring operations:
+ * FSIF ring is used differently to Linux-like split devices. This stems from 
+ * the fact that no I/O request queue is present. The use of some of the macros
+ * defined in ring.h is not allowed, in particular:
+ * RING_PUSH_REQUESTS_AND_CHECK_NOTIFY cannot be used.
+ *
+ * The protocol used for FSIF ring is described below:
+ *
+ * In order to reserve a request the frontend:
+ * a) saves current frontend_ring->req_prod_pvt into a local variable
+ * b) checks that there are free request using the local req_prod_pvt
+ * c) tries to reserve the request using cmpxchg on frontend_ring->req_prod_pvt
+ *    if cmpxchg fails, it means that someone reserved the request, start from
+ *    a)
+ * 
+ * In order to commit a request to the shared ring:
+ * a) cmpxchg shared_ring->req_prod from local req_prod_pvt to req_prod_pvt+1 
+ *    Loop if unsuccessful.
+ * NOTE: Request should be commited to the shared ring as quickly as possible,
+ *       because otherwise other threads might busy loop trying to commit next
+ *       requests. It also follows that preemption should be disabled, if
+ *       possible, for the duration of the request construction.
+ */
+
+/* Number of free requests (for use on front side only). */
+#define FS_RING_FREE_REQUESTS(_r, _req_prod_pvt)                         \
+    (RING_SIZE(_r) - (_req_prod_pvt - (_r)->rsp_cons))
+
+
+
+static RING_IDX reserve_fsif_request(struct fs_import *import)
+{
+    RING_IDX idx; 
+
+    down(&import->reqs_sem);
+    preempt_disable();
+again:    
+    /* We will attempt to reserve slot idx */
+    idx = import->ring.req_prod_pvt;
+    ASSERT (FS_RING_FREE_REQUESTS(&import->ring, idx));
+    /* Attempt to reserve */
+    if(cmpxchg(&import->ring.req_prod_pvt, idx, idx+1) != idx)
+        goto again;
+
+    return idx; 
+}
+
+static void commit_fsif_request(struct fs_import *import, RING_IDX idx)
+{
+    while(cmpxchg(&import->ring.sring->req_prod, idx, idx+1) != idx)
+    {
+        printk("Failed to commit a request: req_prod=%d, idx=%d\n",
+                import->ring.sring->req_prod, idx);
+    }
+    preempt_enable();
+
+    /* NOTE: we cannot do anything clever about rsp_event, to hold off
+     * notifications, because we don't know if we are a single request (in 
which
+     * case we have to notify always), or a part of a larger request group
+     * (when, in some cases, notification isn't required) */
+    notify_remote_via_evtchn(import->local_port);
+}
+
+
+
+static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
+{
+    unsigned int old_id, new_id;
+
+again:    
+    old_id = freelist[0];
+    /* Note: temporal inconsistency, since freelist[0] can be changed by 
someone
+     * else, but we are a sole owner of freelist[id], it's OK. */
+    freelist[id] = old_id;
+    new_id = id;
+    if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
+    {
+        printk("Cmpxchg on freelist add failed.\n");
+        goto again;
+    }
+}
+
+/* always call reserve_fsif_request(import) before this, to protect from
+ * depletion. */
+static inline unsigned short get_id_from_freelist(unsigned short* freelist)
+{
+    unsigned int old_id, new_id;
+
+again:    
+    old_id = freelist[0];
+    new_id = freelist[old_id];
+    if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
+    {
+        printk("Cmpxchg on freelist remove failed.\n");
+        goto again;
+    }
+    
+    return old_id;
+}
+
+/******************************************************************************/
+/*                  END OF RING REQUEST/RESPONSES HANDLING                    
*/
+/******************************************************************************/
+
+
+
+/******************************************************************************/
+/*                         INDIVIDUAL FILE OPERATIONS                         
*/
+/******************************************************************************/
+int fs_open(struct fs_import *import, char *file)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int fd;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    sprintf(fsr->page, "%s", file);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_FILE_OPEN;
+    req->id = priv_req_id;
+    req->u.fopen.gref = fsr->gref;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    fd = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("The following FD returned: %d\n", fd);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return fd;
+} 
+
+int fs_close(struct fs_import *import, int fd)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_close call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_FILE_CLOSE;
+    req->id = priv_req_id;
+    req->u.fclose.fd = fd;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("Close returned: %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+}
+
+ssize_t fs_read(struct fs_import *import, int fd, void *buf, 
+                ssize_t len, ssize_t offset)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    ssize_t ret;
+
+    BUG_ON(len > PAGE_SIZE);
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    memset(fsr->page, 0, PAGE_SIZE);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_FILE_READ;
+    req->id = priv_req_id;
+    req->u.fread.fd = fd;
+    req->u.fread.gref = fsr->gref;
+    req->u.fread.len = len;
+    req->u.fread.offset = offset;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (ssize_t)fsr->shadow_rsp.ret_val;
+    DEBUG("The following ret value returned %d\n", ret);
+    if(ret > 0)
+        memcpy(buf, fsr->page, ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+} 
+
+ssize_t fs_write(struct fs_import *import, int fd, void *buf, 
+                 ssize_t len, ssize_t offset)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    ssize_t ret;
+
+    BUG_ON(len > PAGE_SIZE);
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    memcpy(fsr->page, buf, len);
+    BUG_ON(len > PAGE_SIZE);
+    memset((char *)fsr->page + len, 0, PAGE_SIZE - len); 
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_FILE_WRITE;
+    req->id = priv_req_id;
+    req->u.fwrite.fd = fd;
+    req->u.fwrite.gref = fsr->gref;
+    req->u.fwrite.len = len;
+    req->u.fwrite.offset = offset;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (ssize_t)fsr->shadow_rsp.ret_val;
+    DEBUG("The following ret value returned %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+} 
+
+int fs_stat(struct fs_import *import, 
+            int fd, 
+            struct fsif_stat_response *stat)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_stat call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    memset(fsr->page, 0, PAGE_SIZE);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_STAT;
+    req->id = priv_req_id;
+    req->u.fstat.fd   = fd;
+    req->u.fstat.gref = fsr->gref;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("Following ret from fstat: %d\n", ret);
+    memcpy(stat, fsr->page, sizeof(struct fsif_stat_response));
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+} 
+
+int fs_truncate(struct fs_import *import, 
+                int fd, 
+                int64_t length)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_truncate call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_FILE_TRUNCATE;
+    req->id = priv_req_id;
+    req->u.ftruncate.fd = fd;
+    req->u.ftruncate.length = length;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("Following ret from ftruncate: %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+} 
+
+int fs_remove(struct fs_import *import, char *file)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    sprintf(fsr->page, "%s", file);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_REMOVE;
+    req->id = priv_req_id;
+    req->u.fremove.gref = fsr->gref;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("The following ret: %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+}
+
+
+int fs_rename(struct fs_import *import, 
+              char *old_file_name, 
+              char *new_file_name)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+    char old_header[] = "old: ";
+    char new_header[] = "new: ";
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    sprintf(fsr->page, "%s%s%c%s%s", 
+            old_header, old_file_name, '\0', new_header, new_file_name);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_RENAME;
+    req->id = priv_req_id;
+    req->u.frename.gref = fsr->gref;
+    req->u.frename.old_name_offset = strlen(old_header);
+    req->u.frename.new_name_offset = strlen(old_header) +
+                                     strlen(old_file_name) +
+                                     strlen(new_header) +
+                                     1 /* Accouning for the additional 
+                                          end of string character */;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("The following ret: %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+}
+
+int fs_create(struct fs_import *import, char *name, 
+              int8_t directory, int32_t mode)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_create call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    sprintf(fsr->page, "%s", name);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_CREATE;
+    req->id = priv_req_id;
+    req->u.fcreate.gref = fsr->gref;
+    req->u.fcreate.directory = directory;
+    req->u.fcreate.mode = mode;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("The following ret: %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+} 
+
+char** fs_list(struct fs_import *import, char *name, 
+               int32_t offset, int32_t *nr_files, int *has_more)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    char **files, *current_file;
+    int i;
+
+    DEBUG("Different masks: NR_FILES=(%llx, %d), ERROR=(%llx, %d), 
HAS_MORE(%llx, %d)\n",
+            NR_FILES_MASK, NR_FILES_SHIFT, ERROR_MASK, ERROR_SHIFT, 
HAS_MORE_FLAG, HAS_MORE_SHIFT);
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_list call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    sprintf(fsr->page, "%s", name);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_DIR_LIST;
+    req->id = priv_req_id;
+    req->u.flist.gref = fsr->gref;
+    req->u.flist.offset = offset;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    *nr_files = (fsr->shadow_rsp.ret_val & NR_FILES_MASK) >> NR_FILES_SHIFT;
+    files = NULL;
+    if(*nr_files <= 0) goto exit;
+    files = malloc(sizeof(char*) * (*nr_files));
+    current_file = fsr->page;
+    for(i=0; i<*nr_files; i++)
+    {
+        files[i] = strdup(current_file); 
+        current_file += strlen(current_file) + 1;
+    }
+    if(has_more != NULL)
+        *has_more = fsr->shadow_rsp.ret_val & HAS_MORE_FLAG;
+    add_id_to_freelist(priv_req_id, import->freelist);
+exit:
+    return files;
+} 
+
+int fs_chmod(struct fs_import *import, int fd, int32_t mode)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_chmod call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_CHMOD;
+    req->id = priv_req_id;
+    req->u.fchmod.fd = fd;
+    req->u.fchmod.mode = mode;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("The following returned: %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+} 
+
+int64_t fs_space(struct fs_import *import, char *location)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int64_t ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_space is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+    sprintf(fsr->page, "%s", location);
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_FS_SPACE;
+    req->id = priv_req_id;
+    req->u.fspace.gref = fsr->gref;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int64_t)fsr->shadow_rsp.ret_val;
+    DEBUG("The following returned: %lld\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+} 
+
+int fs_sync(struct fs_import *import, int fd)
+{
+    struct fs_request *fsr;
+    unsigned short priv_req_id;
+    RING_IDX back_req_id; 
+    struct fsif_request *req;
+    int ret;
+
+    /* Prepare request for the backend */
+    back_req_id = reserve_fsif_request(import);
+    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
+
+    /* Prepare our private request structure */
+    priv_req_id = get_id_from_freelist(import->freelist);
+    DEBUG("Request id for fs_sync call is: %d\n", priv_req_id);
+    fsr = &import->requests[priv_req_id];
+    fsr->thread = current;
+
+    req = RING_GET_REQUEST(&import->ring, back_req_id);
+    req->type = REQ_FILE_SYNC;
+    req->id = priv_req_id;
+    req->u.fsync.fd = fd;
+
+    /* Set blocked flag before commiting the request, thus avoiding missed
+     * response race */
+    block(current);
+    commit_fsif_request(import, back_req_id);
+    schedule();
+    
+    /* Read the response */
+    ret = (int)fsr->shadow_rsp.ret_val;
+    DEBUG("Close returned: %d\n", ret);
+    add_id_to_freelist(priv_req_id, import->freelist);
+
+    return ret;
+}
+
+
+/******************************************************************************/
+/*                       END OF INDIVIDUAL FILE OPERATIONS                    
*/
+/******************************************************************************/
+
+
+static void fsfront_handler(evtchn_port_t port, struct pt_regs *regs, void 
*data)
+{
+    struct fs_import *import = (struct fs_import*)data;
+    static int in_irq = 0;
+    RING_IDX cons, rp;
+    int more;
+
+    /* Check for non-reentrance */
+    BUG_ON(in_irq);
+    in_irq = 1;
+
+    DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id);
+moretodo:   
+    rp = import->ring.sring->req_prod;
+    rmb(); /* Ensure we see queued responses up to 'rp'. */
+    cons = import->ring.rsp_cons;
+    while (cons != rp)
+    {
+        struct fsif_response *rsp;
+        struct fs_request *req;
+
+        rsp = RING_GET_RESPONSE(&import->ring, cons); 
+        DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n", 
+            import->ring.rsp_cons, rsp->id, rsp->ret_val);
+        req = &import->requests[rsp->id];
+        memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response));
+        DEBUG("Waking up: %s\n", req->thread->name);
+        wake(req->thread);
+
+        cons++;
+        up(&import->reqs_sem);
+    }
+
+    import->ring.rsp_cons = rp;
+    RING_FINAL_CHECK_FOR_RESPONSES(&import->ring, more);
+    if(more) goto moretodo;
+    
+    in_irq = 0;
+}
+
+/* Small utility function to figure out our domain id */
+static domid_t get_self_id(void)
+{
+    char *dom_id;
+    domid_t ret; 
+
+    BUG_ON(xenbus_read(XBT_NIL, "domid", &dom_id));
+    sscanf(dom_id, "%d", &ret);
+
+    return ret;
+}
+
+static void alloc_request_table(struct fs_import *import)
+{
+    struct fs_request *requests;
+    int i;
+
+    BUG_ON(import->nr_entries <= 0);
+    printk("Allocating request array for import %d, nr_entries = %d.\n",
+            import->import_id, import->nr_entries);
+    requests = xmalloc_array(struct fs_request, import->nr_entries);
+    import->freelist = xmalloc_array(unsigned short, import->nr_entries);
+    memset(import->freelist, 0, sizeof(unsigned short) * import->nr_entries);
+    for(i=0; i<import->nr_entries; i++)
+    {
+       /* TODO: that's a lot of memory */
+        requests[i].page = (void *)alloc_page(); 
+        requests[i].gref = gnttab_grant_access(import->dom_id, 
+                                               virt_to_mfn(requests[i].page),
+                                               0);
+        //printk("   ===>> Page=%lx, gref=%d, mfn=%lx\n", requests[i].page, 
requests[i].gref, virt_to_mfn(requests[i].page));
+        add_id_to_freelist(i, import->freelist);
+    }
+    import->requests = requests;
+}
+
+
+/******************************************************************************/
+/*                                FS TESTS                                    
*/
+/******************************************************************************/
+
+
+void test_fs_import(void *data)
+{
+    struct fs_import *import = (struct fs_import *)data; 
+    int ret, fd, i;
+    int32_t nr_files;
+    char buffer[1024];
+    ssize_t offset;
+    char **files;
+    long ret64;
+   
+    /* Sleep for 1s and then try to open a file */
+    sleep(1000);
+    ret = fs_create(import, "mini-os-created-directory", 1, 0777);
+    printk("Directory create: %d\n", ret);
+
+    ret = fs_create(import, "mini-os-created-directory/mini-os-created-file", 
0, 0666);
+    printk("File create: %d\n", ret);
+
+    fd = fs_open(import, "mini-os-created-directory/mini-os-created-file");
+    printk("File descriptor: %d\n", fd);
+    if(fd < 0) return;
+
+    offset = 0;
+    for(i=0; i<10; i++)
+    {
+        sprintf(buffer, "Current time is: %lld\n", NOW());
+        ret = fs_write(import, fd, buffer, strlen(buffer), offset);
+        printk("Writen current time (%d)\n", ret);
+        if(ret < 0)
+            return;
+        offset += ret;
+    }
+
+    ret = fs_close(import, fd);
+    printk("Closed fd: %d, ret=%d\n", fd, ret);
+   
+    printk("Listing files in /\n");
+    files = fs_list(import, "/", 0, &nr_files, NULL); 
+    for(i=0; i<nr_files; i++)
+        printk(" files[%d] = %s\n", i, files[i]);
+
+    ret64 = fs_space(import, "/");
+    printk("Free space: %lld (=%lld Mb)\n", ret64, (ret64 >> 20));
+    
+}
+
+#if 0
+//    char *content = (char *)alloc_page();
+    int fd, ret;
+//    int read;
+    char write_string[] = "\"test data written from minios\"";
+    struct fsif_stat_response stat;
+    char **files;
+    int32_t nr_files, i;
+    int64_t ret64;
+
+
+    fd = fs_open(import, "test-export-file");
+//    read = fs_read(import, fd, content, PAGE_SIZE, 0);
+//    printk("Read: %d bytes\n", read); 
+//    content[read] = '\0';
+//    printk("Value: %s\n", content);
+    ret = fs_write(import, fd, write_string, strlen(write_string), 0);
+    printk("Ret after write: %d\n", ret);
+    ret = fs_stat(import, fd, &stat);
+    printk("Ret after stat: %d\n", ret);
+    printk(" st_mode=%o\n", stat.stat_mode);
+    printk(" st_uid =%d\n", stat.stat_uid);
+    printk(" st_gid =%d\n", stat.stat_gid);
+    printk(" st_size=%ld\n", stat.stat_size);
+    printk(" st_atime=%ld\n", stat.stat_atime);
+    printk(" st_mtime=%ld\n", stat.stat_mtime);
+    printk(" st_ctime=%ld\n", stat.stat_ctime);
+    ret = fs_truncate(import, fd, 30);
+    printk("Ret after truncate: %d\n", ret);
+    ret = fs_remove(import, "test-to-remove/test-file");
+    printk("Ret after remove: %d\n", ret);
+    ret = fs_remove(import, "test-to-remove");
+    printk("Ret after remove: %d\n", ret);
+    ret = fs_chmod(import, fd, 0700);
+    printk("Ret after chmod: %d\n", ret);
+    ret = fs_sync(import, fd);
+    printk("Ret after sync: %d\n", ret);
+    ret = fs_close(import, fd);
+    //ret = fs_rename(import, "test-export-file", "renamed-test-export-file");
+    //printk("Ret after rename: %d\n", ret);
+    ret = fs_create(import, "created-dir", 1, 0777);
+    printk("Ret after dir create: %d\n", ret);
+    ret = fs_create(import, "created-dir/created-file", 0, 0777);
+    printk("Ret after file create: %d\n", ret);
+    files = fs_list(import, "/", 15, &nr_files, NULL); 
+    for(i=0; i<nr_files; i++)
+        printk(" files[%d] = %s\n", i, files[i]);
+    ret64 = fs_space(import, "created-dir");
+    printk("Ret after space: %lld\n", ret64);
+
+#endif
+
+
+/******************************************************************************/
+/*                            END OF FS TESTS                                 
*/
+/******************************************************************************/
+
+static int init_fs_import(struct fs_import *import)
+{    
+    char *err;
+    xenbus_transaction_t xbt;
+    char nodename[1024], r_nodename[1024], token[128], *message = NULL;
+    struct fsif_sring *sring;
+    int retry = 0;
+    domid_t self_id;
+
+    printk("Initialising FS fortend to backend dom %d\n", import->dom_id);
+    /* Allocate page for the shared ring */
+    sring = (struct fsif_sring*) alloc_page();
+    memset(sring, 0, PAGE_SIZE);
+
+    /* Init the shared ring */
+    SHARED_RING_INIT(sring);
+
+    /* Init private frontend ring */
+    FRONT_RING_INIT(&import->ring, sring, PAGE_SIZE);
+    import->nr_entries = import->ring.nr_ents;
+
+    /* Allocate table of requests */
+    alloc_request_table(import);
+    init_SEMAPHORE(&import->reqs_sem, import->nr_entries);
+
+    /* Grant access to the shared ring */
+    import->gnt_ref = gnttab_grant_access(import->dom_id, virt_to_mfn(sring), 
0);
+   
+    /* Allocate event channel */ 
+    BUG_ON(evtchn_alloc_unbound(import->dom_id, 
+                                fsfront_handler, 
+                                //ANY_CPU, 
+                                import, 
+                                &import->local_port));
+
+    
+    self_id = get_self_id(); 
+    /* Write the frontend info to a node in our Xenbus */
+    sprintf(nodename, "/local/domain/%d/device/vfs/%d", 
+                        self_id, import->import_id);
+
+again:    
+    err = xenbus_transaction_start(&xbt);
+    if (err) {
+        printk("starting transaction\n");
+    }
+    
+    err = xenbus_printf(xbt, 
+                        nodename, 
+                        "ring-ref",
+                        "%u",
+                        import->gnt_ref);
+    if (err) {
+        message = "writing ring-ref";
+        goto abort_transaction;
+    }
+
+    err = xenbus_printf(xbt, 
+                        nodename,
+                        "event-channel", 
+                        "%u", 
+                        import->local_port);
+    if (err) {
+        message = "writing event-channel";
+        goto abort_transaction;
+    }
+
+    err = xenbus_printf(xbt, nodename, "state", STATE_READY, 0xdeadbeef);
+
+    
+    err = xenbus_transaction_end(xbt, 0, &retry);
+    if (retry) {
+            goto again;
+        printk("completing transaction\n");
+    }
+
+    /* Now, when our node is prepared we write request in the exporting domain
+     * */
+    printk("Our own id is %d\n", self_id);
+    sprintf(r_nodename, 
+            "/local/domain/%d/backend/vfs/exports/requests/%d/%d/frontend", 
+            import->dom_id, self_id, import->export_id);
+    BUG_ON(xenbus_write(XBT_NIL, r_nodename, nodename));
+
+    goto done;
+
+abort_transaction:
+    xenbus_transaction_end(xbt, 1, &retry);
+
+done:
+
+#define WAIT_PERIOD 10   /* Wait period in ms */    
+#define MAX_WAIT    10   /* Max number of WAIT_PERIODs */
+    import->backend = NULL;
+    sprintf(r_nodename, "%s/backend", nodename);
+   
+    for(retry = MAX_WAIT; retry > 0; retry--)
+    { 
+        xenbus_read(XBT_NIL, r_nodename, &import->backend);
+        if(import->backend)
+        {
+            printk("Backend found at %s\n", import->backend);
+            break;
+        }
+       sleep(WAIT_PERIOD);
+    }        
+    
+    if(!import->backend)
+    {
+        printk("No backend available.\n");
+        /* TODO - cleanup datastructures/xenbus */
+        return 0;
+    }
+    sprintf(r_nodename, "%s/state", import->backend);
+    sprintf(token, "fs-front-%d", import->import_id);
+    /* The token will not be unique if multiple imports are inited */
+    xenbus_watch_path(XBT_NIL, r_nodename/*, token*/);
+    xenbus_wait_for_value(/*token,*/ r_nodename, STATE_READY);
+    printk("Backend ready.\n");
+   
+    //create_thread("fs-tester", test_fs_import, import); 
+
+    return 1;
+}
+
+static void add_export(struct list_head *exports, unsigned int domid)
+{
+    char node[1024], **exports_list = NULL, *ret_msg;
+    int j = 0;
+    static int import_id = 0;
+
+    sprintf(node, "/local/domain/%d/backend/vfs/exports", domid);
+    ret_msg = xenbus_ls(XBT_NIL, node, &exports_list);
+    if (ret_msg && strcmp(ret_msg, "ENOENT"))
+        printk("couldn't read %s: %s\n", node, ret_msg);
+    while(exports_list && exports_list[j])
+    {
+        struct fs_import *import; 
+        int export_id = -1;
+        
+        sscanf(exports_list[j], "%d", &export_id);
+        if(export_id >= 0)
+        {
+            import = xmalloc(struct fs_import);
+            import->dom_id = domid;
+            import->export_id = export_id;
+            import->import_id = import_id++;
+            INIT_LIST_HEAD(&import->list);
+            list_add(&import->list, exports);
+        }
+        free(exports_list[j]);
+        j++;
+    }
+    if(exports_list)
+        free(exports_list);
+    if(ret_msg)
+        free(ret_msg);
+}
+
+#if 0
+static struct list_head* probe_exports(void)
+{
+    struct list_head *exports;
+    char **node_list = NULL, *msg = NULL;
+    int i = 0;
+
+    exports = xmalloc(struct list_head);
+    INIT_LIST_HEAD(exports);
+    
+    msg = xenbus_ls(XBT_NIL, "/local/domain", &node_list);
+    if(msg)
+    {
+        printk("Could not list VFS exports (%s).\n", msg);
+        goto exit;
+    }
+
+    while(node_list[i])
+    {
+        add_export(exports, atoi(node_list[i]));
+        free(node_list[i]);
+        i++;
+    } 
+
+exit:    
+    if(msg)
+        free(msg);
+    if(node_list)
+        free(node_list);
+    return exports;
+}
+#endif
+
+LIST_HEAD(exports);
+
+void init_fs_frontend(void)
+{
+    struct list_head *entry;
+    struct fs_import *import = NULL;
+    printk("Initing FS fronend(s).\n");
+
+    //exports = probe_exports();
+    add_export(&exports, 0);
+    list_for_each(entry, &exports)
+    {
+        import = list_entry(entry, struct fs_import, list);
+        printk("FS export [dom=%d, id=%d] found\n", 
+                import->dom_id, import->export_id);
+        init_fs_import(import);
+    }
+
+    fs_import = import;
+
+    if (!fs_import)
+       printk("No FS import\n");
+}
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/include/blkfront.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/include/blkfront.h Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,26 @@
+#include <wait.h>
+#include <xen/io/blkif.h>
+#include <types.h>
+struct blkfront_dev;
+struct blkfront_aiocb
+{
+    struct blkfront_dev *aio_dev;
+    uint8_t *aio_buf;
+    size_t aio_nbytes;
+    uint64_t aio_offset;
+    void *data;
+
+    grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+
+    void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
+};
+struct blkfront_dev *init_blkfront(char *nodename, uint64_t *sectors, unsigned 
*sector_size, int *mode);
+int blkfront_open(struct blkfront_dev *dev);
+void blkfront_aio(struct blkfront_aiocb *aiocbp, int write);
+void blkfront_aio_read(struct blkfront_aiocb *aiocbp);
+void blkfront_aio_write(struct blkfront_aiocb *aiocbp);
+int blkfront_aio_poll(struct blkfront_dev *dev);
+void blkfront_sync(struct blkfront_dev *dev);
+void shutdown_blkfront(struct blkfront_dev *dev);
+
+extern struct wait_queue_head blkfront_queue;
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/include/fs.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/include/fs.h       Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,51 @@
+#ifndef __FS_H__
+#define __FS_H__
+
+#include <xen/io/fsif.h>
+#include <semaphore.h>
+
+struct fs_import 
+{
+    domid_t dom_id;                 /* dom id of the exporting domain       */ 
+    u16 export_id;                  /* export id (exporting dom specific)   */
+    u16 import_id;                  /* import id (specific to this domain)  */ 
+    struct list_head list;          /* list of all imports                  */
+    unsigned int nr_entries;        /* Number of entries in rings & request
+                                       array                                */
+    struct fsif_front_ring ring;    /* frontend ring (contains shared ring) */
+    int gnt_ref;                    /* grant reference to the shared ring   */
+    evtchn_port_t local_port;       /* local event channel port             */
+    char *backend;                  /* XenBus location of the backend       */
+    struct fs_request *requests;    /* Table of requests                    */
+    unsigned short *freelist;       /* List of free request ids             */
+    struct semaphore reqs_sem;      /* Accounts requests resource           */
+};
+
+
+void init_fs_frontend(void);
+
+int fs_open(struct fs_import *import, char *file);
+int fs_close(struct fs_import *import, int fd);
+ssize_t fs_read(struct fs_import *import, int fd, void *buf, 
+                ssize_t len, ssize_t offset);
+ssize_t fs_write(struct fs_import *import, int fd, void *buf, 
+                 ssize_t len, ssize_t offset);
+int fs_stat(struct fs_import *import, 
+            int fd, 
+            struct fsif_stat_response *stat);
+int fs_truncate(struct fs_import *import, 
+                int fd, 
+                int64_t length);
+int fs_remove(struct fs_import *import, char *file);
+int fs_rename(struct fs_import *import, 
+              char *old_file_name, 
+              char *new_file_name);
+int fs_create(struct fs_import *import, char *name, 
+              int8_t directory, int32_t mode);
+char** fs_list(struct fs_import *import, char *name, 
+               int32_t offset, int32_t *nr_files, int *has_more);
+int fs_chmod(struct fs_import *import, int fd, int32_t mode);
+int64_t fs_space(struct fs_import *import, char *location);
+int fs_sync(struct fs_import *import, int fd);
+
+#endif
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/include/ia64/arch_mm.h
--- a/extras/mini-os/include/ia64/arch_mm.h     Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/include/ia64/arch_mm.h     Fri Jan 18 13:49:48 2008 -0700
@@ -36,4 +36,8 @@
 #define STACK_SIZE_PAGE_ORDER   1
 #define STACK_SIZE              (PAGE_SIZE * (1 << STACK_SIZE_PAGE_ORDER))
 
+#define map_frames(f, n) map_frames_ex(f, n, 1, 0, 1, DOMID_SELF, 0, 0)
+/* TODO */
+#define map_zero(n, a) map_frames_ex(NULL, n, 0, 0, a, DOMID_SELF, 0, 0)
+
 #endif /* __ARCH_MM_H__ */
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/include/mm.h
--- a/extras/mini-os/include/mm.h       Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/include/mm.h       Fri Jan 18 13:49:48 2008 -0700
@@ -57,6 +57,9 @@ void arch_init_mm(unsigned long* start_p
 void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p);
 void arch_init_p2m(unsigned long max_pfn_p);
 
-void *map_frames(unsigned long *f, unsigned long n);
+/* map f[i*stride]+i*increment for i in 0..n-1, aligned on alignment pages */
+void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
+       unsigned long increment, unsigned long alignment, domid_t id,
+       int may_fail, unsigned long prot);
 
 #endif /* _MM_H_ */
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/include/types.h
--- a/extras/mini-os/include/types.h    Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/include/types.h    Fri Jan 18 13:49:48 2008 -0700
@@ -57,6 +57,13 @@ typedef struct { unsigned long pte; } pt
 typedef struct { unsigned long pte; } pte_t;
 #endif /* __i386__ || __x86_64__ */
 
+#if !defined(CONFIG_X86_PAE)
+#define __pte(x) ((pte_t) { (x) } )
+#else
+#define __pte(x) ({ unsigned long long _x = (x);        \
+    ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
+#endif
+
 typedef  u8 uint8_t;
 typedef  s8 int8_t;
 typedef u16 uint16_t;
@@ -69,4 +76,7 @@ typedef s64 int64_t;
 
 #define INT_MAX         ((int)(~0U>>1))
 #define UINT_MAX            (~0U)
+
+typedef long ssize_t;
+typedef unsigned long size_t;
 #endif /* _TYPES_H_ */
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/include/x86/arch_mm.h
--- a/extras/mini-os/include/x86/arch_mm.h      Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/include/x86/arch_mm.h      Fri Jan 18 13:49:48 2008 -0700
@@ -144,12 +144,14 @@ typedef unsigned long pgentry_t;
 
 #if defined(__i386__)
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED)
 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
 #if defined(CONFIG_X86_PAE)
 #define L3_PROT (_PAGE_PRESENT)
 #endif /* CONFIG_X86_PAE */
 #elif defined(__x86_64__)
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
+#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_USER)
 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
@@ -189,7 +191,8 @@ typedef unsigned long maddr_t;
 #endif
 
 extern unsigned long *phys_to_machine_mapping;
-extern char _text, _etext, _edata, _end;
+extern char _text, _etext, _erodata, _edata, _end;
+extern unsigned long mfn_zero;
 #define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
 static __inline__ maddr_t phys_to_machine(paddr_t phys)
 {
@@ -223,5 +226,7 @@ static __inline__ paddr_t machine_to_phy
 #define pte_to_mfn(_pte)           (((_pte) & (PADDR_MASK&PAGE_MASK)) >> 
L1_PAGETABLE_SHIFT)
 #define pte_to_virt(_pte)          to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << 
PAGE_SHIFT)
 
+#define map_frames(f, n) map_frames_ex(f, n, 1, 0, 1, DOMID_SELF, 0, L1_PROT)
+#define map_zero(n, a) map_frames_ex(&mfn_zero, n, 0, 0, a, DOMID_SELF, 0, 
L1_PROT_RO)
 
 #endif /* _ARCH_MM_H_ */
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/include/x86/traps.h
--- a/extras/mini-os/include/x86/traps.h        Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/include/x86/traps.h        Fri Jan 18 13:49:48 2008 -0700
@@ -70,4 +70,8 @@ struct pt_regs {
 
 void dump_regs(struct pt_regs *regs);
 
+#define TRAP_PF_PROT   0x1
+#define TRAP_PF_WRITE  0x2
+#define TRAP_PF_USER   0x4
+
 #endif /* _TRAPS_H_ */
diff -r 7643472d6b43 -r a868bd4236e6 extras/mini-os/kernel.c
--- a/extras/mini-os/kernel.c   Thu Jan 17 12:17:14 2008 -0700
+++ b/extras/mini-os/kernel.c   Fri Jan 18 13:49:48 2008 -0700
@@ -38,6 +38,7 @@
 #include <xenbus.h>
 #include <gnttab.h>
 #include <netfront.h>
+#include <fs.h>
 #include <xen/features.h>
 #include <xen/version.h>
 
@@ -85,6 +86,11 @@ static void netfront_thread(void *p)
     init_netfront(NULL, NULL, NULL);
 }
 
+static void fs_thread(void *p)
+{
+    init_fs_frontend();
+}
+
 /* This should be overridden by the application we are linked against. */
 __attribute__((weak)) int app_main(start_info_t *si)
 {
@@ -92,6 +98,7 @@ __attribute__((weak)) int app_main(start
     create_thread("xenbus_tester", xenbus_tester, si);
     create_thread("periodic_thread", periodic_thread, si);
     create_thread("netfront", netfront_thread, si);
+    create_thread("fs-frontend", fs_thread, si);
     return 0;
 }
 
diff -r 7643472d6b43 -r a868bd4236e6 tools/examples/network-nat
--- a/tools/examples/network-nat        Thu Jan 17 12:17:14 2008 -0700
+++ b/tools/examples/network-nat        Fri Jan 18 13:49:48 2008 -0700
@@ -43,9 +43,9 @@ fi
 
 function dhcp_start()
 {
-  if ! grep -q "subnet 192.0.2.0" "$dhcpd_conf_file"
+  if ! grep -q "subnet 10.0.0.0" "$dhcpd_conf_file"
   then
-    echo >>"$dhcpd_conf_file" "subnet 192.0.2.0 netmask 255.255.255.0 {}"
+    echo >>"$dhcpd_conf_file" "subnet 10.0.0.0 netmask 255.255.0.0 {}"
   fi
 
   "$dhcpd_init_file" restart
@@ -55,7 +55,7 @@ function dhcp_stop()
 function dhcp_stop()
 {
   local tmpfile=$(mktemp)
-  grep -v "subnet 192.0.2.0" "$dhcpd_conf_file" >"$tmpfile"
+  grep -v "subnet 10.0.0.0" "$dhcpd_conf_file" >"$tmpfile"
   if diff "$tmpfile" "$dhcpd_conf_file" >&/dev/null
   then
     rm "$tmpfile"
diff -r 7643472d6b43 -r a868bd4236e6 tools/fs-back/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fs-back/Makefile    Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,40 @@
+XEN_ROOT = ../..
+include $(XEN_ROOT)/tools/Rules.mk
+
+INCLUDES += -I.. -I../lib
+
+IBIN         = fs-backend 
+INST_DIR     = /usr/sbin
+
+CFLAGS   += -Werror
+CFLAGS   += -Wno-unused
+CFLAGS   += -fno-strict-aliasing
+CFLAGS   += -I $(XEN_LIBXC)
+CFLAGS   += $(INCLUDES) -I. -I../xenstore 
+CFLAGS   += -D_GNU_SOURCE
+
+# Get gcc to generate the dependencies for us.
+CFLAGS   += -Wp,-MD,.$(@F).d
+DEPS      = .*.d
+
+LIBS      := -L. -L.. -L../lib
+LIBS      += -L$(XEN_LIBXC)
+LIBS      += -lxenctrl -lpthread -lrt 
+LIBS      += -L$(XEN_XENSTORE) -lxenstore
+
+OBJS     := fs-xenbus.o fs-ops.o
+
+all: $(IBIN)
+
+fs-backend: $(OBJS) fs-backend.c
+       $(CC) $(CFLAGS) -o fs-backend $(OBJS) $(LIBS) fs-backend.c
+
+install: all
+       $(INSTALL_PROG) $(IBIN) $(DESTDIR)$(INST_DIR)
+
+clean:
+       rm -rf *.o *~ $(DEPS) xen $(IBIN) $(LIB)
+
+.PHONY: clean install
+
+-include $(DEPS)
diff -r 7643472d6b43 -r a868bd4236e6 tools/fs-back/fs-backend.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fs-back/fs-backend.c        Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,346 @@
+#undef NDEBUG
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <xenctrl.h>
+#include <aio.h>
+#include <sys/mman.h>
+#include <sys/select.h>
+#include <xen/io/ring.h>
+#include "fs-backend.h"
+
+struct xs_handle *xsh = NULL;
+static struct fs_export *fs_exports = NULL;
+static int export_id = 0;
+static int mount_id = 0;
+
+void dispatch_response(struct mount *mount, int priv_req_id)
+{
+    int i;
+    struct fs_op *op;
+    struct fs_request *req = &mount->requests[priv_req_id];
+
+    for(i=0;;i++)
+    {
+        op = fsops[i];
+        /* We should dispatch a response before reaching the end of the array 
*/
+        assert(op != NULL);
+        if(op->type == req->req_shadow.type)
+        {
+            printf("Found op for type=%d\n", op->type);
+            /* There needs to be a response handler */
+            assert(op->response_handler != NULL);
+            op->response_handler(mount, req);
+            break;
+        }
+    }
+
+    req->active = 0;
+    add_id_to_freelist(priv_req_id, mount->freelist);
+}
+
+static void handle_aio_events(struct mount *mount)
+{
+    int fd, ret, count, i, notify;
+    evtchn_port_t port;
+    /* AIO control block for the evtchn file destriptor */
+    struct aiocb evtchn_cb;
+    const struct aiocb * cb_list[mount->nr_entries];
+    int request_ids[mount->nr_entries];
+
+    /* Prepare the AIO control block for evtchn */ 
+    fd = xc_evtchn_fd(mount->evth); 
+    bzero(&evtchn_cb, sizeof(struct aiocb));
+    evtchn_cb.aio_fildes = fd;
+    evtchn_cb.aio_nbytes = sizeof(port);
+    evtchn_cb.aio_buf = &port;
+    assert(aio_read(&evtchn_cb) == 0);
+
+wait_again:   
+    /* Create list of active AIO requests */
+    count = 0;
+    for(i=0; i<mount->nr_entries; i++)
+        if(mount->requests[i].active)
+        {
+            cb_list[count] = &mount->requests[i].aiocb;
+            request_ids[count] = i;
+            count++;
+        }
+    /* Add the event channel at the end of the list. Event channel needs to be
+     * handled last as it exits this function. */
+    cb_list[count] = &evtchn_cb;
+    request_ids[count] = -1;
+    count++;
+
+    /* Block till an AIO requset finishes, or we get an event */ 
+    while(1) {
+       int ret = aio_suspend(cb_list, count, NULL);
+       if (!ret)
+           break;
+       assert(errno == EINTR);
+    }
+    for(i=0; i<count; i++)
+        if(aio_error(cb_list[i]) != EINPROGRESS)
+        {
+            if(request_ids[i] >= 0)
+                dispatch_response(mount, request_ids[i]);
+            else
+                goto read_event_channel;
+        }
+ 
+    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
+    printf("Pushed responces and notify=%d\n", notify);
+    if(notify)
+        xc_evtchn_notify(mount->evth, mount->local_evtchn);
+    
+    goto wait_again;
+
+read_event_channel:    
+    assert(aio_return(&evtchn_cb) == sizeof(evtchn_port_t)); 
+    assert(xc_evtchn_unmask(mount->evth, mount->local_evtchn) >= 0);
+}
+
+
+void allocate_request_array(struct mount *mount)
+{
+    int i, nr_entries = mount->nr_entries;
+    struct fs_request *requests;
+    unsigned short *freelist;
+    
+    requests = malloc(sizeof(struct fs_request) *nr_entries);
+    freelist = malloc(sizeof(unsigned short) * nr_entries); 
+    memset(requests, 0, sizeof(struct fs_request) * nr_entries);
+    memset(freelist, 0, sizeof(unsigned short) * nr_entries);
+    for(i=0; i< nr_entries; i++)
+    {
+        requests[i].active = 0; 
+        add_id_to_freelist(i, freelist);
+    }
+    mount->requests = requests;
+    mount->freelist = freelist;
+}
+
+
+void* handle_mount(void *data)
+{
+    int more, notify;
+    struct mount *mount = (struct mount *)data;
+    
+    printf("Starting a thread for mount: %d\n", mount->mount_id);
+    allocate_request_array(mount);
+
+    for(;;)
+    {
+        int nr_consumed=0;
+        RING_IDX cons, rp;
+        struct fsif_request *req;
+
+        handle_aio_events(mount);
+moretodo:
+        rp = mount->ring.sring->req_prod;
+        rmb(); /* Ensure we see queued requests up to 'rp'. */
+                
+        while ((cons = mount->ring.req_cons) != rp)
+        {
+            int i;
+            struct fs_op *op;
+
+            printf("Got a request at %d\n", cons);
+            req = RING_GET_REQUEST(&mount->ring, cons);
+            printf("Request type=%d\n", req->type); 
+            for(i=0;;i++)
+            {
+                op = fsops[i];
+                if(op == NULL)
+                {
+                    /* We've reached the end of the array, no appropirate
+                     * handler found. Warn, ignore and continue. */
+                    printf("WARN: Unknown request type: %d\n", req->type);
+                    mount->ring.req_cons++; 
+                    break;
+                }
+                if(op->type == req->type)
+                {
+                    /* There needs to be a dispatch handler */
+                    assert(op->dispatch_handler != NULL);
+                    op->dispatch_handler(mount, req);
+                    break;
+                }
+             }
+
+            nr_consumed++;
+        }
+        printf("Backend consumed: %d requests\n", nr_consumed);
+        RING_FINAL_CHECK_FOR_REQUESTS(&mount->ring, more);
+        if(more) goto moretodo;
+
+        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
+        printf("Pushed responces and notify=%d\n", notify);
+        if(notify)
+            xc_evtchn_notify(mount->evth, mount->local_evtchn);
+    }
+ 
+    printf("Destroying thread for mount: %d\n", mount->mount_id);
+    xc_gnttab_munmap(mount->gnth, mount->ring.sring, 1);
+    xc_gnttab_close(mount->gnth);
+    xc_evtchn_unbind(mount->evth, mount->local_evtchn);
+    xc_evtchn_close(mount->evth);
+    free(mount->frontend);
+    pthread_exit(NULL);
+}
+
+static void handle_connection(int frontend_dom_id, int export_id, char 
*frontend)
+{
+    struct mount *mount;
+    struct fs_export *export;
+    int evt_port;
+    pthread_t handling_thread;
+    struct fsif_sring *sring;
+
+    printf("Handling connection from dom=%d, for export=%d\n", 
+            frontend_dom_id, export_id);
+    /* Try to find the export on the list */
+    export = fs_exports;
+    while(export)
+    {
+        if(export->export_id == export_id)
+            break;
+        export = export->next;
+    }
+    if(!export)
+    {
+        printf("Could not find the export (the id is unknown).\n");
+        return;
+    }
+
+    mount = (struct mount*)malloc(sizeof(struct mount));
+    mount->dom_id = frontend_dom_id;
+    mount->export = export;
+    mount->mount_id = mount_id++;
+    xenbus_read_mount_request(mount, frontend);
+    printf("Frontend found at: %s (gref=%d, evtchn=%d)\n", 
+            mount->frontend, mount->gref, mount->remote_evtchn);
+    xenbus_write_backend_node(mount);
+    mount->evth = -1;
+    mount->evth = xc_evtchn_open(); 
+    assert(mount->evth != -1);
+    mount->local_evtchn = -1;
+    mount->local_evtchn = xc_evtchn_bind_interdomain(mount->evth, 
+                                                     mount->dom_id, 
+                                                     mount->remote_evtchn);
+    assert(mount->local_evtchn != -1);
+    mount->gnth = -1;
+    mount->gnth = xc_gnttab_open(); 
+    assert(mount->gnth != -1);
+    sring = xc_gnttab_map_grant_ref(mount->gnth,
+                                    mount->dom_id,
+                                    mount->gref,
+                                    PROT_READ | PROT_WRITE);
+    BACK_RING_INIT(&mount->ring, sring, PAGE_SIZE);
+    mount->nr_entries = mount->ring.nr_ents; 
+    xenbus_write_backend_ready(mount);
+
+    pthread_create(&handling_thread, NULL, &handle_mount, mount);
+}
+
+static void await_connections(void)
+{
+    int fd, ret, dom_id, export_id; 
+    fd_set fds;
+    char **watch_paths;
+    unsigned int len;
+    char d;
+
+    assert(xsh != NULL);
+    fd = xenbus_get_watch_fd(); 
+    /* Infinite watch loop */
+    do {
+       FD_ZERO(&fds);
+       FD_SET(fd, &fds);
+        ret = select(fd+1, &fds, NULL, NULL, NULL);
+        assert(ret == 1);
+        watch_paths = xs_read_watch(xsh, &len);
+        assert(len == 2);
+        assert(strcmp(watch_paths[1], "conn-watch") == 0);
+        dom_id = -1;
+        export_id = -1;
+       d = 0;
+        printf("Path changed %s\n", watch_paths[0]);
+        sscanf(watch_paths[0], WATCH_NODE"/%d/%d/fronten%c", 
+                &dom_id, &export_id, &d);
+        if((dom_id >= 0) && (export_id >= 0) && d == 'd') {
+           char *frontend = xs_read(xsh, XBT_NULL, watch_paths[0], NULL);
+           if (frontend) {
+               handle_connection(dom_id, export_id, frontend);
+               xs_rm(xsh, XBT_NULL, watch_paths[0]);
+           }
+       }
+next_select:        
+        printf("Awaiting next connection.\n");
+        /* TODO - we need to figure out what to free */
+       free(watch_paths);
+    } while (1);
+}
+
+struct fs_export* create_export(char *name, char *export_path)
+{
+    struct fs_export *curr_export, **last_export;
+
+    /* Create export structure */
+    curr_export = (struct fs_export *)malloc(sizeof(struct fs_export));
+    curr_export->name = name;
+    curr_export->export_path = export_path;
+    curr_export->export_id = export_id++;
+    /* Thread it onto the list */
+    curr_export->next = NULL;
+    last_export = &fs_exports;
+    while(*last_export)
+        last_export = &((*last_export)->next);
+    *last_export = curr_export;
+
+    return curr_export;
+}
+
+
+int main(void)
+{
+    struct fs_export *export;
+
+    /* Open the connection to XenStore first */
+    xsh = xs_domain_open();
+    assert(xsh != NULL);
+    xs_rm(xsh, XBT_NULL, ROOT_NODE);
+    /* Create watch node */
+    xenbus_create_request_node();
+    
+    /* Create & register the default export */
+    export = create_export("default", "/exports");
+    xenbus_register_export(export);
+
+    await_connections();
+    /* Close the connection to XenStore when we are finished with everything */
+    xs_daemon_close(xsh);
+#if 0
+    int xc_handle;
+    char *shared_page;
+    int prot = PROT_READ | PROT_WRITE;
+  
+    xc_handle = xc_gnttab_open();
+    printf("Main fn.\n");
+
+    shared_page = xc_gnttab_map_grant_ref(xc_handle,
+                                           7,
+                                           2047,
+                                           prot);
+    
+    shared_page[20] = '\0';
+    printf("Current content of the page = %s\n", shared_page);
+    sprintf(shared_page, "%s", "Haha dirty page now! Very bad page.");
+    xc_gnttab_munmap(xc_handle, shared_page, 1);
+    xc_gnttab_close(xc_handle);
+    unrelated next line, saved for later convinience    
+    xc_evtchn_notify(mount->evth, mount->local_evtchn);
+#endif
+}
diff -r 7643472d6b43 -r a868bd4236e6 tools/fs-back/fs-backend.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fs-back/fs-backend.h        Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,86 @@
+#ifndef __LIB_FS_BACKEND__
+#define __LIB_FS_BACKEND__
+
+#include <aio.h>
+#include <xs.h>
+#include <xen/grant_table.h>
+#include <xen/event_channel.h>
+#include <xen/io/ring.h>
+#include <xen/io/fsif.h>
+
+#define ROOT_NODE           "backend/vfs"
+#define EXPORTS_SUBNODE     "exports"
+#define EXPORTS_NODE        ROOT_NODE"/"EXPORTS_SUBNODE
+#define WATCH_NODE          EXPORTS_NODE"/requests"
+
+struct fs_export
+{
+    int export_id;
+    char *export_path;
+    char *name;
+    struct fs_export *next; 
+};
+
+struct fs_request
+{
+    int active;
+    void *page;                         /* Pointer to mapped grant */
+    struct fsif_request req_shadow;
+    struct aiocb aiocb; 
+};
+
+
+struct mount
+{
+    struct fs_export *export;
+    int dom_id;
+    char *frontend;
+    int mount_id;                     /* = backend id */
+    grant_ref_t gref;
+    evtchn_port_t remote_evtchn;
+    int evth;                         /* Handle to the event channel */
+    evtchn_port_t local_evtchn;
+    int gnth;
+    struct fsif_back_ring ring;
+    int nr_entries;
+    struct fs_request *requests;
+    unsigned short *freelist;
+};
+
+
+/* Handle to XenStore driver */
+extern struct xs_handle *xsh;
+
+bool xenbus_create_request_node(void);
+int xenbus_register_export(struct fs_export *export);
+int xenbus_get_watch_fd(void);
+void xenbus_read_mount_request(struct mount *mount, char *frontend);
+void xenbus_write_backend_node(struct mount *mount);
+void xenbus_write_backend_ready(struct mount *mount);
+
+/* File operations, implemented in fs-ops.c */
+struct fs_op
+{
+    int type;       /* Type of request (from fsif.h) this handlers 
+                       are responsible for */
+    void (*dispatch_handler)(struct mount *mount, struct fsif_request *req);
+    void (*response_handler)(struct mount *mount, struct fs_request *req);
+};
+
+/* This NULL terminated array of all file requests handlers */
+extern struct fs_op *fsops[];
+
+static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
+{
+    freelist[id] = freelist[0];
+    freelist[0]  = id;
+}
+
+static inline unsigned short get_id_from_freelist(unsigned short* freelist)
+{
+    unsigned int id = freelist[0];
+    freelist[0] = freelist[id];
+    return id;
+}
+
+#endif /* __LIB_FS_BACKEND__ */
diff -r 7643472d6b43 -r a868bd4236e6 tools/fs-back/fs-ops.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fs-back/fs-ops.c    Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,658 @@
+#undef NDEBUG
+#include <stdio.h>
+#include <aio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <xenctrl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/vfs.h>
+#include <sys/mount.h>
+#include <unistd.h>
+#include "fs-backend.h"
+
+/* For debugging only */
+#include <sys/time.h>
+#include <time.h>
+
+
+#define BUFFER_SIZE 1024
+
+
+unsigned short get_request(struct mount *mount, struct fsif_request *req)
+{
+    unsigned short id = get_id_from_freelist(mount->freelist); 
+
+    printf("Private Request id: %d\n", id);
+    memcpy(&mount->requests[id].req_shadow, req, sizeof(struct fsif_request));
+    mount->requests[id].active = 1;
+
+    return id;
+}
+
+
+void dispatch_file_open(struct mount *mount, struct fsif_request *req)
+{
+    char *file_name, full_path[BUFFER_SIZE];
+    int fd;
+    struct timeval tv1, tv2;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    printf("Dispatching file open operation (gref=%d).\n", req->u.fopen.gref);
+    /* Read the request, and open file */
+    file_name = xc_gnttab_map_grant_ref(mount->gnth,
+                                        mount->dom_id,
+                                        req->u.fopen.gref,
+                                        PROT_READ);
+   
+    req_id = req->id;
+    printf("File open issued for %s\n", file_name); 
+    assert(BUFFER_SIZE > 
+           strlen(file_name) + strlen(mount->export->export_path) + 1); 
+    sprintf(full_path, "%s/%s", mount->export->export_path, file_name);
+    assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0);
+    printf("Issuing open for %s\n", full_path);
+    fd = open(full_path, O_RDWR);
+    printf("Got FD: %d\n", fd);
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)fd;
+}
+
+void dispatch_file_close(struct mount *mount, struct fsif_request *req)
+{
+    int ret;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    printf("Dispatching file close operation (fd=%d).\n", req->u.fclose.fd);
+   
+    req_id = req->id;
+    ret = close(req->u.fclose.fd);
+    printf("Got ret: %d\n", ret);
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+void dispatch_file_read(struct mount *mount, struct fsif_request *req)
+{
+    void *buf;
+    int fd;
+    uint16_t req_id;
+    unsigned short priv_id;
+    struct fs_request *priv_req;
+
+    /* Read the request */
+    buf = xc_gnttab_map_grant_ref(mount->gnth,
+                                  mount->dom_id,
+                                  req->u.fread.gref,
+                                  PROT_WRITE);
+   
+    req_id = req->id;
+    printf("File read issued for FD=%d (len=%"PRIu64", offest=%"PRIu64")\n", 
+            req->u.fread.fd, req->u.fread.len, req->u.fread.offset); 
+   
+    priv_id = get_request(mount, req);
+    printf("Private id is: %d\n", priv_id);
+    priv_req = &mount->requests[priv_id];
+    priv_req->page = buf;
+
+    /* Dispatch AIO read request */
+    bzero(&priv_req->aiocb, sizeof(struct aiocb));
+    priv_req->aiocb.aio_fildes = req->u.fread.fd;
+    priv_req->aiocb.aio_nbytes = req->u.fread.len;
+    priv_req->aiocb.aio_offset = req->u.fread.offset;
+    priv_req->aiocb.aio_buf = buf;
+    assert(aio_read(&priv_req->aiocb) >= 0);
+
+     
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+}
+
+void end_file_read(struct mount *mount, struct fs_request *priv_req)
+{
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    /* Release the grant */
+    assert(xc_gnttab_munmap(mount->gnth, priv_req->page, 1) == 0);
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    req_id = priv_req->req_shadow.id; 
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)aio_return(&priv_req->aiocb);
+}
+
+void dispatch_file_write(struct mount *mount, struct fsif_request *req)
+{
+    void *buf;
+    int fd;
+    uint16_t req_id;
+    unsigned short priv_id;
+    struct fs_request *priv_req;
+
+    /* Read the request */
+    buf = xc_gnttab_map_grant_ref(mount->gnth,
+                                  mount->dom_id,
+                                  req->u.fwrite.gref,
+                                  PROT_READ);
+   
+    req_id = req->id;
+    printf("File write issued for FD=%d (len=%"PRIu64", offest=%"PRIu64")\n", 
+            req->u.fwrite.fd, req->u.fwrite.len, req->u.fwrite.offset); 
+   
+    priv_id = get_request(mount, req);
+    printf("Private id is: %d\n", priv_id);
+    priv_req = &mount->requests[priv_id];
+    priv_req->page = buf;
+
+    /* Dispatch AIO write request */
+    bzero(&priv_req->aiocb, sizeof(struct aiocb));
+    priv_req->aiocb.aio_fildes = req->u.fwrite.fd;
+    priv_req->aiocb.aio_nbytes = req->u.fwrite.len;
+    priv_req->aiocb.aio_offset = req->u.fwrite.offset;
+    priv_req->aiocb.aio_buf = buf;
+    assert(aio_write(&priv_req->aiocb) >= 0);
+
+     
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+}
+
+void end_file_write(struct mount *mount, struct fs_request *priv_req)
+{
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    /* Release the grant */
+    assert(xc_gnttab_munmap(mount->gnth, priv_req->page, 1) == 0);
+    
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    req_id = priv_req->req_shadow.id; 
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)aio_return(&priv_req->aiocb);
+}
+
+void dispatch_stat(struct mount *mount, struct fsif_request *req)
+{
+    struct fsif_stat_response *buf;
+    struct stat stat;
+    int fd, ret;
+    uint16_t req_id;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+
+    /* Read the request */
+    buf = xc_gnttab_map_grant_ref(mount->gnth,
+                                  mount->dom_id,
+                                  req->u.fstat.gref,
+                                  PROT_WRITE);
+   
+    req_id = req->id;
+    fd = req->u.fstat.fd;
+    printf("File stat issued for FD=%d\n", fd); 
+   
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+   
+    /* Stat, and create the response */ 
+    ret = fstat(fd, &stat);
+    printf("Mode=%o, uid=%d, a_time=%ld\n",
+            stat.st_mode, stat.st_uid, stat.st_atime);
+    buf->stat_mode  = stat.st_mode;
+    buf->stat_uid   = stat.st_uid;
+    buf->stat_gid   = stat.st_gid;
+#ifdef BLKGETSIZE
+    if (S_ISBLK(stat.st_mode)) {
+       int sectors;
+       if (ioctl(fd, BLKGETSIZE, &sectors)) {
+           perror("getting device size\n");
+           buf->stat_size = 0;
+       } else
+           buf->stat_size = sectors << 9;
+    } else
+#endif
+       buf->stat_size  = stat.st_size;
+    buf->stat_atime = stat.st_atime;
+    buf->stat_mtime = stat.st_mtime;
+    buf->stat_ctime = stat.st_ctime;
+
+    /* Release the grant */
+    assert(xc_gnttab_munmap(mount->gnth, buf, 1) == 0);
+    
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+
+
+void dispatch_truncate(struct mount *mount, struct fsif_request *req)
+{
+    int fd, ret;
+    uint16_t req_id;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    int64_t length;
+
+    req_id = req->id;
+    fd = req->u.ftruncate.fd;
+    length = req->u.ftruncate.length;
+    printf("File truncate issued for FD=%d, length=%"PRId64"\n", fd, length); 
+   
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+   
+    /* Stat, and create the response */ 
+    ret = ftruncate(fd, length);
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+
+void dispatch_remove(struct mount *mount, struct fsif_request *req)
+{
+    char *file_name, full_path[BUFFER_SIZE];
+    int ret;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    printf("Dispatching remove operation (gref=%d).\n", req->u.fremove.gref);
+    /* Read the request, and open file */
+    file_name = xc_gnttab_map_grant_ref(mount->gnth,
+                                        mount->dom_id,
+                                        req->u.fremove.gref,
+                                        PROT_READ);
+   
+    req_id = req->id;
+    printf("File remove issued for %s\n", file_name); 
+    assert(BUFFER_SIZE > 
+           strlen(file_name) + strlen(mount->export->export_path) + 1); 
+    sprintf(full_path, "%s/%s", mount->export->export_path, file_name);
+    assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0);
+    printf("Issuing remove for %s\n", full_path);
+    ret = remove(full_path);
+    printf("Got ret: %d\n", ret);
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+
+
+void dispatch_rename(struct mount *mount, struct fsif_request *req)
+{
+    char *buf, *old_file_name, *new_file_name;
+    char old_full_path[BUFFER_SIZE], new_full_path[BUFFER_SIZE];
+    int ret;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    printf("Dispatching rename operation (gref=%d).\n", req->u.fremove.gref);
+    /* Read the request, and open file */
+    buf = xc_gnttab_map_grant_ref(mount->gnth,
+                                  mount->dom_id,
+                                  req->u.frename.gref,
+                                  PROT_READ);
+   
+    req_id = req->id;
+    old_file_name = buf + req->u.frename.old_name_offset;
+    new_file_name = buf + req->u.frename.new_name_offset;
+    printf("File rename issued for %s -> %s (buf=%s)\n", 
+            old_file_name, new_file_name, buf); 
+    assert(BUFFER_SIZE > 
+           strlen(old_file_name) + strlen(mount->export->export_path) + 1); 
+    assert(BUFFER_SIZE > 
+           strlen(new_file_name) + strlen(mount->export->export_path) + 1); 
+    sprintf(old_full_path, "%s/%s", mount->export->export_path, old_file_name);
+    sprintf(new_full_path, "%s/%s", mount->export->export_path, new_file_name);
+    assert(xc_gnttab_munmap(mount->gnth, buf, 1) == 0);
+    printf("Issuing rename for %s -> %s\n", old_full_path, new_full_path);
+    ret = rename(old_full_path, new_full_path);
+    printf("Got ret: %d\n", ret);
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+
+
+void dispatch_create(struct mount *mount, struct fsif_request *req)
+{
+    char *file_name, full_path[BUFFER_SIZE];
+    int ret;
+    int8_t directory;
+    int32_t mode;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    printf("Dispatching file create operation (gref=%d).\n", 
req->u.fcreate.gref);
+    /* Read the request, and create file/directory */
+    mode = req->u.fcreate.mode;
+    directory = req->u.fcreate.directory;
+    file_name = xc_gnttab_map_grant_ref(mount->gnth,
+                                        mount->dom_id,
+                                        req->u.fcreate.gref,
+                                        PROT_READ);
+   
+    req_id = req->id;
+    printf("File create issued for %s\n", file_name); 
+    assert(BUFFER_SIZE > 
+           strlen(file_name) + strlen(mount->export->export_path) + 1); 
+    sprintf(full_path, "%s/%s", mount->export->export_path, file_name);
+    assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0);
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+    if(directory)
+    {
+        printf("Issuing create for directory: %s\n", full_path);
+        ret = mkdir(full_path, mode);
+    }
+    else
+    {
+        printf("Issuing create for file: %s\n", full_path);
+        ret = creat(full_path, mode); 
+    }
+    printf("Got ret %d (errno=%d)\n", ret, errno);
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+
+void dispatch_list(struct mount *mount, struct fsif_request *req)
+{
+    char *file_name, *buf, full_path[BUFFER_SIZE];
+    uint32_t offset, nr_files, error_code; 
+    uint64_t ret_val;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+    DIR *dir;
+    struct dirent *dirent = NULL;
+
+    printf("Dispatching list operation (gref=%d).\n", req->u.flist.gref);
+    /* Read the request, and list directory */
+    offset = req->u.flist.offset;
+    buf = file_name = xc_gnttab_map_grant_ref(mount->gnth,
+                                        mount->dom_id,
+                                        req->u.flist.gref,
+                                        PROT_READ | PROT_WRITE);
+   
+    req_id = req->id;
+    printf("Dir list issued for %s\n", file_name); 
+    assert(BUFFER_SIZE > 
+           strlen(file_name) + strlen(mount->export->export_path) + 1); 
+    sprintf(full_path, "%s/%s", mount->export->export_path, file_name);
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+    ret_val = 0;
+    nr_files = 0;
+    dir = opendir(full_path);
+    if(dir == NULL)
+    {
+        error_code = errno;
+        goto error_out;
+    }
+    /* Skip offset dirs */
+    dirent = readdir(dir);
+    while(offset-- > 0 && dirent != NULL)
+        dirent = readdir(dir);
+    /* If there was any error with reading the directory, errno will be set */
+    error_code = errno;
+    /* Copy file names of the remaining non-NULL dirents into buf */
+    assert(NAME_MAX < PAGE_SIZE >> 1);
+    while(dirent != NULL && 
+            (PAGE_SIZE - ((unsigned long)buf & PAGE_MASK) > NAME_MAX))
+    {
+        int curr_length = strlen(dirent->d_name) + 1;
+        
+        memcpy(buf, dirent->d_name, curr_length);
+        buf += curr_length;
+        dirent = readdir(dir);
+        error_code = errno;
+        nr_files++;
+    }
+error_out:    
+    ret_val = ((nr_files << NR_FILES_SHIFT) & NR_FILES_MASK) | 
+              ((error_code << ERROR_SHIFT) & ERROR_MASK) | 
+              (dirent != NULL ? HAS_MORE_FLAG : 0);
+    assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0);
+    
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = ret_val;
+}
+
+void dispatch_chmod(struct mount *mount, struct fsif_request *req)
+{
+    int fd, ret;
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+    int32_t mode;
+
+    printf("Dispatching file chmod operation (fd=%d, mode=%o).\n", 
+            req->u.fchmod.fd, req->u.fchmod.mode);
+    req_id = req->id;
+    fd = req->u.fchmod.fd;
+    mode = req->u.fchmod.mode;
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+    ret = fchmod(fd, mode); 
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+
+void dispatch_fs_space(struct mount *mount, struct fsif_request *req)
+{
+    char *file_name, full_path[BUFFER_SIZE];
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+    struct statfs stat;
+    int64_t ret;
+
+    printf("Dispatching fs space operation (gref=%d).\n", req->u.fspace.gref);
+    /* Read the request, and open file */
+    file_name = xc_gnttab_map_grant_ref(mount->gnth,
+                                        mount->dom_id,
+                                        req->u.fspace.gref,
+                                        PROT_READ);
+   
+    req_id = req->id;
+    printf("Fs space issued for %s\n", file_name); 
+    assert(BUFFER_SIZE > 
+           strlen(file_name) + strlen(mount->export->export_path) + 1); 
+    sprintf(full_path, "%s/%s", mount->export->export_path, file_name);
+    assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0);
+    printf("Issuing fs space for %s\n", full_path);
+    ret = statfs(full_path, &stat);
+    if(ret >= 0)
+        ret = stat.f_bsize * stat.f_bfree;
+
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)ret;
+}
+
+void dispatch_file_sync(struct mount *mount, struct fsif_request *req)
+{
+    int fd;
+    uint16_t req_id;
+    unsigned short priv_id;
+    struct fs_request *priv_req;
+
+    req_id = req->id;
+    fd = req->u.fsync.fd;
+    printf("File sync issued for FD=%d\n", fd); 
+   
+    priv_id = get_request(mount, req);
+    printf("Private id is: %d\n", priv_id);
+    priv_req = &mount->requests[priv_id];
+
+    /* Dispatch AIO read request */
+    bzero(&priv_req->aiocb, sizeof(struct aiocb));
+    priv_req->aiocb.aio_fildes = fd;
+    assert(aio_fsync(O_SYNC, &priv_req->aiocb) >= 0);
+
+     
+    /* We can advance the request consumer index, from here on, the request
+     * should not be used (it may be overrinden by a response) */
+    mount->ring.req_cons++;
+}
+
+void end_file_sync(struct mount *mount, struct fs_request *priv_req)
+{
+    RING_IDX rsp_idx;
+    fsif_response_t *rsp;
+    uint16_t req_id;
+
+    /* Get a response from the ring */
+    rsp_idx = mount->ring.rsp_prod_pvt++;
+    req_id = priv_req->req_shadow.id; 
+    printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id);
+    rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx);
+    rsp->id = req_id; 
+    rsp->ret_val = (uint64_t)aio_return(&priv_req->aiocb);
+}
+
+struct fs_op fopen_op     = {.type             = REQ_FILE_OPEN,
+                             .dispatch_handler = dispatch_file_open,
+                             .response_handler = NULL};
+struct fs_op fclose_op    = {.type             = REQ_FILE_CLOSE,
+                             .dispatch_handler = dispatch_file_close,
+                             .response_handler = NULL};
+struct fs_op fread_op     = {.type             = REQ_FILE_READ,
+                             .dispatch_handler = dispatch_file_read,
+                             .response_handler = end_file_read};
+struct fs_op fwrite_op    = {.type             = REQ_FILE_WRITE,
+                             .dispatch_handler = dispatch_file_write,
+                             .response_handler = end_file_write};
+struct fs_op fstat_op     = {.type             = REQ_STAT,
+                             .dispatch_handler = dispatch_stat,
+                             .response_handler = NULL};
+struct fs_op ftruncate_op = {.type             = REQ_FILE_TRUNCATE,
+                             .dispatch_handler = dispatch_truncate,
+                             .response_handler = NULL};
+struct fs_op fremove_op   = {.type             = REQ_REMOVE,
+                             .dispatch_handler = dispatch_remove,
+                             .response_handler = NULL};
+struct fs_op frename_op   = {.type             = REQ_RENAME,
+                             .dispatch_handler = dispatch_rename,
+                             .response_handler = NULL};
+struct fs_op fcreate_op   = {.type             = REQ_CREATE,
+                             .dispatch_handler = dispatch_create,
+                             .response_handler = NULL};
+struct fs_op flist_op     = {.type             = REQ_DIR_LIST,
+                             .dispatch_handler = dispatch_list,
+                             .response_handler = NULL};
+struct fs_op fchmod_op    = {.type             = REQ_CHMOD,
+                             .dispatch_handler = dispatch_chmod,
+                             .response_handler = NULL};
+struct fs_op fspace_op    = {.type             = REQ_FS_SPACE,
+                             .dispatch_handler = dispatch_fs_space,
+                             .response_handler = NULL};
+struct fs_op fsync_op     = {.type             = REQ_FILE_SYNC,
+                             .dispatch_handler = dispatch_file_sync,
+                             .response_handler = end_file_sync};
+
+
+struct fs_op *fsops[] = {&fopen_op, 
+                         &fclose_op, 
+                         &fread_op, 
+                         &fwrite_op, 
+                         &fstat_op, 
+                         &ftruncate_op, 
+                         &fremove_op, 
+                         &frename_op, 
+                         &fcreate_op, 
+                         &flist_op, 
+                         &fchmod_op, 
+                         &fspace_op, 
+                         &fsync_op, 
+                         NULL};
diff -r 7643472d6b43 -r a868bd4236e6 tools/fs-back/fs-xenbus.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fs-back/fs-xenbus.c Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,180 @@
+#undef NDEBUG
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <assert.h>
+#include <xenctrl.h>
+#include <xs.h>
+#include <xen/io/fsif.h>
+#include "fs-backend.h"
+
+
+static bool xenbus_printf(struct xs_handle *xsh,
+                          xs_transaction_t xbt,
+                          char* node,
+                          char* path,
+                          char* fmt,
+                          ...)
+{
+    char fullpath[1024];
+    char val[1024];
+    va_list args;
+    
+    va_start(args, fmt);
+    sprintf(fullpath,"%s/%s", node, path);
+    vsprintf(val, fmt, args);
+    va_end(args);
+    printf("xenbus_printf (%s) <= %s.\n", fullpath, val);    
+
+    return xs_write(xsh, xbt, fullpath, val, strlen(val));
+}
+
+bool xenbus_create_request_node(void)
+{
+    bool ret;
+    struct xs_permissions perms;
+    
+    assert(xsh != NULL);
+    xs_rm(xsh, XBT_NULL, WATCH_NODE);
+    ret = xs_mkdir(xsh, XBT_NULL, WATCH_NODE); 
+    if (!ret)
+        return false;
+
+    perms.id = 0;
+    perms.perms = XS_PERM_WRITE;
+    ret = xs_set_permissions(xsh, XBT_NULL, WATCH_NODE, &perms, 1);
+
+    return ret;
+}
+
+int xenbus_register_export(struct fs_export *export)
+{
+    xs_transaction_t xst = 0;
+    char node[1024];
+    struct xs_permissions perms;
+
+    assert(xsh != NULL);
+    if(xsh == NULL)
+    {
+        printf("Could not open connection to xenbus deamon.\n");
+        goto error_exit;
+    }
+    printf("Connection to the xenbus deamon opened successfully.\n");
+
+    /* Start transaction */
+    xst = xs_transaction_start(xsh);
+    if(xst == 0)
+    {
+        printf("Could not start a transaction.\n");
+        goto error_exit;
+    }
+    printf("XS transaction is %d\n", xst); 
+ 
+    /* Create node string */
+    sprintf(node, "%s/%d", EXPORTS_NODE, export->export_id); 
+    /* Remove old export (if exists) */ 
+    xs_rm(xsh, xst, node);
+
+    if(!xenbus_printf(xsh, xst, node, "name", "%s", export->name))
+    {
+        printf("Could not write the export node.\n");
+        goto error_exit;
+    }
+
+    /* People need to be able to read our export */
+    perms.id = 0;
+    perms.perms = XS_PERM_READ;
+    if(!xs_set_permissions(xsh, xst, EXPORTS_NODE, &perms, 1))
+    {
+        printf("Could not set permissions on the export node.\n");
+        goto error_exit;
+    }
+
+    xs_transaction_end(xsh, xst, 0);
+    return 0; 
+
+error_exit:    
+    if(xst != 0)
+        xs_transaction_end(xsh, xst, 1);
+    return -1;
+}
+
+int xenbus_get_watch_fd(void)
+{
+    int res;
+    assert(xsh != NULL);
+    res = xs_watch(xsh, WATCH_NODE, "conn-watch");
+    assert(res);
+    return xs_fileno(xsh); 
+}
+
+void xenbus_read_mount_request(struct mount *mount, char *frontend)
+{
+    char node[1024];
+    char *s;
+
+    assert(xsh != NULL);
+#if 0
+    sprintf(node, WATCH_NODE"/%d/%d/frontend", 
+                           mount->dom_id, mount->export->export_id);
+    frontend = xs_read(xsh, XBT_NULL, node, NULL);
+#endif
+    mount->frontend = frontend;
+    sprintf(node, "%s/state", frontend);
+    s = xs_read(xsh, XBT_NULL, node, NULL);
+    assert(strcmp(s, STATE_READY) == 0);
+    free(s);
+    sprintf(node, "%s/ring-ref", frontend);
+    s = xs_read(xsh, XBT_NULL, node, NULL);
+    mount->gref = atoi(s);
+    free(s);
+    sprintf(node, "%s/event-channel", frontend);
+    s = xs_read(xsh, XBT_NULL, node, NULL);
+    mount->remote_evtchn = atoi(s);
+    free(s);
+}
+
+/* Small utility function to figure out our domain id */
+static int get_self_id(void)
+{
+    char *dom_id;
+    int ret; 
+                
+    assert(xsh != NULL);
+    dom_id = xs_read(xsh, XBT_NULL, "domid", NULL);
+    sscanf(dom_id, "%d", &ret); 
+    free(dom_id);
+                        
+    return ret;                                  
+} 
+
+
+void xenbus_write_backend_node(struct mount *mount)
+{
+    char node[1024], backend_node[1024];
+    int self_id;
+
+    assert(xsh != NULL);
+    self_id = get_self_id();
+    printf("Our own dom_id=%d\n", self_id);
+    sprintf(node, "%s/backend", mount->frontend);
+    sprintf(backend_node, "/local/domain/%d/"ROOT_NODE"/%d",
+                                self_id, mount->mount_id);
+    xs_write(xsh, XBT_NULL, node, backend_node, strlen(backend_node));
+
+    sprintf(node, ROOT_NODE"/%d/state", mount->mount_id);
+    xs_write(xsh, XBT_NULL, node, STATE_INITIALISED, 
strlen(STATE_INITIALISED));
+}
+
+void xenbus_write_backend_ready(struct mount *mount)
+{
+    char node[1024];
+    int self_id;
+
+    assert(xsh != NULL);
+    self_id = get_self_id();
+    sprintf(node, ROOT_NODE"/%d/state", mount->mount_id);
+    xs_write(xsh, XBT_NULL, node, STATE_READY, strlen(STATE_READY));
+}
+
diff -r 7643472d6b43 -r a868bd4236e6 tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Thu Jan 17 12:17:14 2008 -0700
+++ b/tools/ioemu/target-i386-dm/helper2.c      Fri Jan 18 13:49:48 2008 -0700
@@ -476,7 +476,7 @@ void cpu_ioreq_timeoffset(CPUState *env,
 {
     char b[64];
 
-    time_offset += (ulong)req->data;
+    time_offset += (unsigned long)req->data;
 
     fprintf(logfile, "Time offset set %ld, added offset %ld\n", time_offset, 
req->data);
     sprintf(b, "%ld", time_offset);
@@ -637,6 +637,7 @@ int main_loop(void)
     int evtchn_fd = xce_handle == -1 ? -1 : xc_evtchn_fd(xce_handle);
     char qemu_file[PATH_MAX];
     fd_set fds;
+    int ret = 0;
 
     buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
                                       cpu_single_env);
@@ -647,9 +648,14 @@ int main_loop(void)
 
     xenstore_record_dm_state("running");
     while (1) {
-        while (!(vm_running && suspend_requested))
+        while (!((vm_running && suspend_requested) || shutdown_requested))
             /* Wait up to 10 msec. */
             main_loop_wait(10);
+
+        if (shutdown_requested) {
+            ret = EXCP_INTERRUPT;
+            break;
+        }
 
         fprintf(logfile, "device model saving state\n");
 
@@ -676,7 +682,7 @@ int main_loop(void)
         xenstore_record_dm_state("running");
     }
 
-    return 0;
+    return ret;
 }
 
 void destroy_hvm_domain(void)
diff -r 7643472d6b43 -r a868bd4236e6 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Thu Jan 17 12:17:14 2008 -0700
+++ b/tools/python/xen/xend/image.py    Fri Jan 18 13:49:48 2008 -0700
@@ -321,7 +321,7 @@ class ImageHandler:
             return
         if self.pid:
             try:
-                os.kill(self.pid, signal.SIGKILL)
+                os.kill(self.pid, signal.SIGTERM)
             except OSError, exn:
                 log.exception(exn)
             try:
diff -r 7643472d6b43 -r a868bd4236e6 tools/xentrace/xenctx.c
--- a/tools/xentrace/xenctx.c   Thu Jan 17 12:17:14 2008 -0700
+++ b/tools/xentrace/xenctx.c   Fri Jan 18 13:49:48 2008 -0700
@@ -29,6 +29,7 @@ int domid = 0;
 int domid = 0;
 int frame_ptrs = 0;
 int stack_trace = 0;
+int disp_all = 0;
 
 #if defined (__i386__)
 #if defined (__OpenBSD__)
@@ -243,12 +244,23 @@ void print_flags(uint64_t flags)
 {
     int i;
 
-    printf("flags: %08" PRIx64, flags);
+    printf("\nflags: %08" PRIx64, flags);
     for (i = 21; i >= 0; i--) {
         char *s = flag_values[i][(flags >> i) & 1];
         if (s != NULL)
             printf(" %s", s);
     }
+    printf("\n");
+}
+
+void print_special(unsigned long *regs, const char *name, unsigned int mask)
+{
+    unsigned int i;
+
+    printf("\n");
+    for (i = 0; mask; mask >>= 1, ++i)
+        if (mask & 1)
+            printf("%s%u: " FMT_SIZE_T "\n", name, i, (size_t)regs[i]);
 }
 #endif
 
@@ -257,12 +269,10 @@ void print_ctx(vcpu_guest_context_t *ctx
 {
     struct cpu_user_regs *regs = &ctx1->user_regs;
 
-    printf("eip: %08x ", regs->eip);
+    printf("cs:eip: %04x:%08x ", regs->cs, regs->eip);
     print_symbol(regs->eip);
     print_flags(regs->eflags);
-    printf("\n");
-
-    printf("esp: %08x\n", regs->esp);
+    printf("ss:esp: %04x:%08x\n", regs->ss, regs->esp);
 
     printf("eax: %08x\t", regs->eax);
     printf("ebx: %08x\t", regs->ebx);
@@ -273,47 +283,59 @@ void print_ctx(vcpu_guest_context_t *ctx
     printf("edi: %08x\t", regs->edi);
     printf("ebp: %08x\n", regs->ebp);
 
-    printf(" cs: %08x\t", regs->cs);
-    printf(" ds: %08x\t", regs->ds);
-    printf(" fs: %08x\t", regs->fs);
-    printf(" gs: %08x\n", regs->gs);
-
+    printf(" ds:     %04x\t", regs->ds);
+    printf(" es:     %04x\t", regs->es);
+    printf(" fs:     %04x\t", regs->fs);
+    printf(" gs:     %04x\n", regs->gs);
+
+    if (disp_all) {
+        print_special(ctx1->ctrlreg, "cr", 0x1d);
+        print_special(ctx1->debugreg, "dr", 0xcf);
+    }
 }
 #elif defined(__x86_64__)
 void print_ctx(vcpu_guest_context_t *ctx1)
 {
     struct cpu_user_regs *regs = &ctx1->user_regs;
 
-    printf("rip: %08lx ", regs->rip);
+    printf("rip: %016lx ", regs->rip);
     print_symbol(regs->rip);
     print_flags(regs->rflags);
-    printf("\n");
-    printf("rsp: %08lx\n", regs->rsp);
-
-    printf("rax: %08lx\t", regs->rax);
-    printf("rbx: %08lx\t", regs->rbx);
-    printf("rcx: %08lx\t", regs->rcx);
-    printf("rdx: %08lx\n", regs->rdx);
-
-    printf("rsi: %08lx\t", regs->rsi);
-    printf("rdi: %08lx\t", regs->rdi);
-    printf("rbp: %08lx\n", regs->rbp);
-
-    printf(" r8: %08lx\t", regs->r8);
-    printf(" r9: %08lx\t", regs->r9);
-    printf("r10: %08lx\t", regs->r10);
-    printf("r11: %08lx\n", regs->r11);
-
-    printf("r12: %08lx\t", regs->r12);
-    printf("r13: %08lx\t", regs->r13);
-    printf("r14: %08lx\t", regs->r14);
-    printf("r15: %08lx\n", regs->r15);
-
-    printf(" cs:     %04x\t", regs->cs);
-    printf(" ds:     %04x\t", regs->ds);
-    printf(" fs:     %04x\t", regs->fs);
-    printf(" gs:     %04x\n", regs->gs);
-
+    printf("rsp: %016lx\n", regs->rsp);
+
+    printf("rax: %016lx\t", regs->rax);
+    printf("rcx: %016lx\t", regs->rcx);
+    printf("rdx: %016lx\n", regs->rdx);
+
+    printf("rbx: %016lx\t", regs->rbx);
+    printf("rsi: %016lx\t", regs->rsi);
+    printf("rdi: %016lx\n", regs->rdi);
+
+    printf("rbp: %016lx\t", regs->rbp);
+    printf(" r8: %016lx\t", regs->r8);
+    printf(" r9: %016lx\n", regs->r9);
+
+    printf("r10: %016lx\t", regs->r10);
+    printf("r11: %016lx\t", regs->r11);
+    printf("r12: %016lx\n", regs->r12);
+
+    printf("r13: %016lx\t", regs->r13);
+    printf("r14: %016lx\t", regs->r14);
+    printf("r15: %016lx\n", regs->r15);
+
+    printf(" cs: %04x\t", regs->cs);
+    printf(" ss: %04x\t", regs->ss);
+    printf(" ds: %04x\t", regs->ds);
+    printf(" es: %04x\n", regs->es);
+
+    printf(" fs: %04x @ %016lx\n", regs->fs, ctx1->fs_base);
+    printf(" gs: %04x @ %016lx/%016lx\n", regs->gs,
+           ctx1->gs_base_kernel, ctx1->gs_base_user);
+
+    if (disp_all) {
+        print_special(ctx1->ctrlreg, "cr", 0x1d);
+        print_special(ctx1->debugreg, "dr", 0xcf);
+    }
 }
 #elif defined(__ia64__)
 
@@ -742,6 +764,8 @@ void usage(void)
 #ifdef __ia64__
     printf("  -r LIST, --regs=LIST  display more registers.\n");
     printf("  -a --all          same as --regs=tlb,cr,ar,br,bk\n");
+#else
+    printf("  -a --all          display more registers\n");
 #endif
 }
 
@@ -811,6 +835,10 @@ int main(int argc, char **argv)
             disp_bank_regs = 1;
             disp_tlb = 1;
             break;
+#else
+        case 'a':
+            disp_all = 1;
+            break;
 #endif
         case 'h':
             usage();
diff -r 7643472d6b43 -r a868bd4236e6 unmodified_drivers/linux-2.6/mkbuildtree
--- a/unmodified_drivers/linux-2.6/mkbuildtree  Thu Jan 17 12:17:14 2008 -0700
+++ b/unmodified_drivers/linux-2.6/mkbuildtree  Fri Jan 18 13:49:48 2008 -0700
@@ -8,27 +8,28 @@ else
     echo "This may be overridden on the command line (i386,x86_64,ia64)."
 fi
 
-C=$PWD
+C=$(cd $(dirname $0) && pwd)
+R=${C%/*/*}
 
 if [ -n "$XEN" -a -d "$XEN" ]; then
   XEN=$(cd $XEN && pwd)
 else
-  XEN=$C/../../xen
+  XEN=$R/xen
 fi
+echo "Xen tree: $XEN"
 
 if [ -n "$XL" -a -d "$XL" ]; then
   XL=$(cd $XL && pwd)
 else
-  XL=$C/../../linux-2.6.18-xen.hg
+  XL=$R/linux-2.6.18-xen.hg
 fi
+echo "Linux tree: $XL"
 
-for d in $(find ${XL}/drivers/xen/ -maxdepth 1 -type d | sed -e 1d); do
-    if ! echo $d | egrep -q back; then
-        lndir $d $(basename $d) > /dev/null 2>&1
-    fi
-    if ! echo $d | egrep -q ball; then
-        lndir $d $(basename $d) > /dev/null 2>&1
-    fi
+cd $C
+
+for d in $(find ${XL}/drivers/xen/ -mindepth 1 -maxdepth 1 -type d); do
+    test -d $(basename $d) || continue
+    lndir $d $(basename $d) > /dev/null 2>&1
 done
 
 ln -sf ${XL}/drivers/xen/core/gnttab.c platform-pci
@@ -44,23 +45,27 @@ ln -nsf ${XEN}/include/public include/xe
 # Need to be quite careful here: we don't want the files we link in to
 # risk overriding the native Linux ones (in particular, system.h must
 # be native and not xenolinux).
-case "$uname"
-in
-"x86_64")
-    ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/hypervisor.h include/asm
-    ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/hypercall.h include/asm
-    ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/synch_bitops.h include/asm
-    ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/maddr.h include/asm
-    ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/gnttab_dma.h include/asm
-    mkdir -p include/asm-i386
-    lndir -silent ${XL}/include/asm-i386 include/asm-i386
-  ;;
-i[34567]86)
-    ln -sf ${XL}/include/asm-i386/mach-xen/asm/hypervisor.h include/asm
-    ln -sf ${XL}/include/asm-i386/mach-xen/asm/hypercall.h include/asm
-    ln -sf ${XL}/include/asm-i386/mach-xen/asm/synch_bitops.h include/asm
-    ln -sf ${XL}/include/asm-i386/mach-xen/asm/maddr.h include/asm
-    ln -sf ${XL}/include/asm-i386/mach-xen/asm/gnttab_dma.h include/asm
+case "$uname" in
+i[34567]86|x86_64)
+    if [ -d ${XL}/include/asm-x86 ]; then
+        ln -sf ${XL}/include/asm-x86/mach-xen/asm/hypervisor.h include/asm
+        ln -sf ${XL}/include/asm-x86/mach-xen/asm/hypercall*.h include/asm
+        ln -sf ${XL}/include/asm-x86/mach-xen/asm/synch_bitops*.h include/asm
+        ln -sf ${XL}/include/asm-x86/mach-xen/asm/maddr*.h include/asm
+        ln -sf ${XL}/include/asm-x86/mach-xen/asm/gnttab_dma.h include/asm
+    else
+        if [ $uname = x86_64 ]; then
+            mkdir -p include/asm-i386
+            lndir -silent ${XL}/include/asm-i386 include/asm-i386
+        else
+            uname=i386
+        fi
+        ln -sf ${XL}/include/asm-$uname/mach-xen/asm/hypervisor.h include/asm
+        ln -sf ${XL}/include/asm-$uname/mach-xen/asm/hypercall.h include/asm
+        ln -sf ${XL}/include/asm-$uname/mach-xen/asm/synch_bitops.h include/asm
+        ln -sf ${XL}/include/asm-$uname/mach-xen/asm/maddr.h include/asm
+        ln -sf ${XL}/include/asm-$uname/mach-xen/asm/gnttab_dma.h include/asm
+    fi
   ;;
 "ia64")
     ln -sf ${XL}/include/asm-ia64/hypervisor.h include/asm
diff -r 7643472d6b43 -r a868bd4236e6 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/arch/ia64/xen/hypercall.c     Fri Jan 18 13:49:48 2008 -0700
@@ -33,9 +33,6 @@
 #include <xen/event.h>
 #include <xen/perfc.h>
 #include <public/arch-ia64/debug_op.h>
-
-extern long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
-extern long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg);
 
 static IA64FAULT
 xen_hypercall (struct pt_regs *regs)
@@ -457,7 +454,7 @@ static long unregister_guest_callback(st
 /* First time to add callback to xen/ia64, so let's just stick to
  * the newer callback interface.
  */
-long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
 {
     long ret;
 
diff -r 7643472d6b43 -r a868bd4236e6 xen/arch/x86/hvm/vmx/vtd/dmar.c
--- a/xen/arch/x86/hvm/vmx/vtd/dmar.c   Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/arch/x86/hvm/vmx/vtd/dmar.c   Fri Jan 18 13:49:48 2008 -0700
@@ -43,7 +43,6 @@ LIST_HEAD(acpi_drhd_units);
 LIST_HEAD(acpi_drhd_units);
 LIST_HEAD(acpi_rmrr_units);
 LIST_HEAD(acpi_atsr_units);
-LIST_HEAD(acpi_ioapic_units);
 
 u8 dmar_host_address_width;
 
@@ -66,6 +65,47 @@ static int __init acpi_register_rmrr_uni
     return 0;
 }
 
+static int acpi_ioapic_device_match(
+    struct list_head *ioapic_list, unsigned int apic_id)
+{
+    struct acpi_ioapic_unit *ioapic;
+    list_for_each_entry( ioapic, ioapic_list, list ) {
+        if (ioapic->apic_id == apic_id)
+            return 1;
+    }
+    return 0;
+}
+
+struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id)
+{
+    struct acpi_drhd_unit *drhd;
+    list_for_each_entry( drhd, &acpi_drhd_units, list ) {
+        if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "ioapic_to_drhd: drhd->address = %lx\n",
+                    drhd->address);
+            return drhd;
+        }
+    }
+    return NULL;
+}
+
+struct iommu * ioapic_to_iommu(unsigned int apic_id)
+{
+    struct acpi_drhd_unit *drhd;
+
+    list_for_each_entry( drhd, &acpi_drhd_units, list ) {
+        if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "ioapic_to_iommu: drhd->address = %lx\n",
+                    drhd->address);
+            return drhd->iommu;
+        }
+    }
+    dprintk(XENLOG_WARNING VTDPREFIX, "returning NULL\n");
+    return NULL;
+}
+
 static int acpi_pci_device_match(struct pci_dev *devices, int cnt,
                                  struct pci_dev *dev)
 {
@@ -111,18 +151,18 @@ struct acpi_drhd_unit * acpi_find_matche
         if ( acpi_pci_device_match(drhd->devices,
                                    drhd->devices_cnt, dev) )
         {
-            gdprintk(XENLOG_INFO VTDPREFIX, 
-                     "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
-                     drhd->address);
+            dprintk(XENLOG_INFO VTDPREFIX, 
+                    "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
+                    drhd->address);
             return drhd;
         }
     }
 
     if ( include_all_drhd )
     {
-        gdprintk(XENLOG_INFO VTDPREFIX, 
-                 "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
-                 include_all_drhd->address);
+        dprintk(XENLOG_INFO VTDPREFIX, 
+                "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
+                include_all_drhd->address);
         return include_all_drhd;
     }
 
@@ -160,8 +200,8 @@ struct acpi_atsr_unit * acpi_find_matche
 
     if ( all_ports_atsru )
     {
-        gdprintk(XENLOG_INFO VTDPREFIX,
-                 "acpi_find_matched_atsr_unit: all_ports_atsru\n");
+        dprintk(XENLOG_INFO VTDPREFIX,
+                "acpi_find_matched_atsr_unit: all_ports_atsru\n");
         return all_ports_atsru;;
     }
 
@@ -180,9 +220,10 @@ static int scope_device_count(void *star
     while ( start < end )
     {
         scope = start;
-        if ( scope->length < MIN_SCOPE_LEN )
-        {
-            printk(KERN_WARNING PREFIX "Invalid device scope\n");
+        if ( (scope->length < MIN_SCOPE_LEN) ||
+             (scope->dev_type >= ACPI_DEV_ENTRY_COUNT) )
+        {
+            dprintk(XENLOG_WARNING VTDPREFIX, "Invalid device scope\n");
             return -EINVAL;
         }
 
@@ -199,16 +240,16 @@ static int scope_device_count(void *star
 
         if ( scope->dev_type == ACPI_DEV_ENDPOINT )
         {
-            printk(KERN_INFO PREFIX
-                   "found endpoint: bdf = %x:%x:%x\n",
-                   bus, path->dev, path->fn);
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found endpoint: bdf = %x:%x:%x\n",
+                    bus, path->dev, path->fn);
             count++;
         }
         else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
         {
-            printk(KERN_INFO PREFIX
-                   "found bridge: bdf = %x:%x:%x\n",
-                   bus, path->dev, path->fn);
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found bridge: bdf = %x:%x:%x\n",
+                    bus, path->dev, path->fn);
             sec_bus = read_pci_config_byte(
                 bus, path->dev, path->fn, PCI_SECONDARY_BUS);
             sub_bus = read_pci_config_byte(
@@ -237,16 +278,16 @@ static int scope_device_count(void *star
         }
         else if ( scope->dev_type == ACPI_DEV_IOAPIC )
         {
-            printk(KERN_INFO PREFIX
-                   "found IOAPIC: bdf = %x:%x:%x\n",
-                   bus, path->dev, path->fn);
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found IOAPIC: bdf = %x:%x:%x\n",
+                    bus, path->dev, path->fn);
             count++;
         }
         else
         {
-            printk(KERN_INFO PREFIX
-                   "found MSI HPET: bdf = %x:%x:%x\n",
-                   bus, path->dev, path->fn);
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found MSI HPET: bdf = %x:%x:%x\n",
+                    bus, path->dev, path->fn);
             count++;
         }
 
@@ -256,8 +297,8 @@ static int scope_device_count(void *star
     return count;
 }
 
-static int __init acpi_parse_dev_scope(void *start, void *end, int *cnt,
-                                       struct pci_dev **devices)
+static int __init acpi_parse_dev_scope(
+    void *start, void *end, void *acpi_entry, int type)
 {
     struct acpi_dev_scope *scope;
     u8 bus, sub_bus, sec_bus;
@@ -268,10 +309,33 @@ static int __init acpi_parse_dev_scope(v
     u8 dev, func;
     u32 l;
 
+    int *cnt = NULL;
+    struct pci_dev **devices = NULL;
+    struct acpi_drhd_unit *dmaru = (struct acpi_drhd_unit *) acpi_entry;
+    struct acpi_rmrr_unit *rmrru = (struct acpi_rmrr_unit *) acpi_entry;
+    struct acpi_atsr_unit *atsru = (struct acpi_atsr_unit *) acpi_entry;
+
+    switch (type) {
+        case DMAR_TYPE:
+            cnt = &(dmaru->devices_cnt);
+            devices = &(dmaru->devices);
+            break;
+        case RMRR_TYPE:
+            cnt = &(rmrru->devices_cnt);
+            devices = &(rmrru->devices);
+            break;
+        case ATSR_TYPE:
+            cnt = &(atsru->devices_cnt);
+            devices = &(atsru->devices);
+            break;
+        default:
+            dprintk(XENLOG_ERR VTDPREFIX, "invalid vt-d acpi entry type\n");
+    }
+
     *cnt = scope_device_count(start, end);
     if ( *cnt == 0 )
     {
-        printk(KERN_INFO PREFIX "acpi_parse_dev_scope: no device\n");
+        dprintk(XENLOG_INFO VTDPREFIX, "acpi_parse_dev_scope: no device\n");
         return 0;
     }
 
@@ -298,18 +362,18 @@ static int __init acpi_parse_dev_scope(v
 
         if ( scope->dev_type == ACPI_DEV_ENDPOINT )
         {
-            printk(KERN_INFO PREFIX
-                   "found endpoint: bdf = %x:%x:%x\n",
-                   bus, path->dev, path->fn);
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found endpoint: bdf = %x:%x:%x\n",
+                    bus, path->dev, path->fn);
             pdev->bus = bus;
             pdev->devfn = PCI_DEVFN(path->dev, path->fn);
             pdev++;
         }
         else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
         {
-            printk(KERN_INFO PREFIX
-                   "found bridge: bus = %x dev = %x func = %x\n",
-                   bus, path->dev, path->fn);
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found bridge: bus = %x dev = %x func = %x\n",
+                    bus, path->dev, path->fn);
             sec_bus = read_pci_config_byte(
                 bus, path->dev, path->fn, PCI_SECONDARY_BUS);
             sub_bus = read_pci_config_byte(
@@ -348,16 +412,15 @@ static int __init acpi_parse_dev_scope(v
             acpi_ioapic_unit->ioapic.bdf.bus = bus;
             acpi_ioapic_unit->ioapic.bdf.dev = path->dev;
             acpi_ioapic_unit->ioapic.bdf.func = path->fn;
-            list_add(&acpi_ioapic_unit->list, &acpi_ioapic_units);
-            printk(KERN_INFO PREFIX
-                   "found IOAPIC: bus = %x dev = %x func = %x\n",
-                   bus, path->dev, path->fn);
+            list_add(&acpi_ioapic_unit->list, &dmaru->ioapic_list);
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found IOAPIC: bus = %x dev = %x func = %x\n",
+                    bus, path->dev, path->fn);
         }
         else
-            printk(KERN_INFO PREFIX
-                   "found MSI HPET: bus = %x dev = %x func = %x\n",
-                   bus, path->dev, path->fn);
-        
+            dprintk(XENLOG_INFO VTDPREFIX,
+                    "found MSI HPET: bus = %x dev = %x func = %x\n",
+                    bus, path->dev, path->fn);
         start += scope->length;
     }
 
@@ -371,6 +434,7 @@ acpi_parse_one_drhd(struct acpi_dmar_ent
     struct acpi_drhd_unit *dmaru;
     int ret = 0;
     static int include_all;
+    void *dev_scope_start, *dev_scope_end;
 
     dmaru = xmalloc(struct acpi_drhd_unit);
     if ( !dmaru )
@@ -379,21 +443,22 @@ acpi_parse_one_drhd(struct acpi_dmar_ent
 
     dmaru->address = drhd->address;
     dmaru->include_all = drhd->flags & 1; /* BIT0: INCLUDE_ALL */
-    printk(KERN_INFO PREFIX "dmaru->address = %lx\n", dmaru->address);
-
-    if ( !dmaru->include_all )
-        ret = acpi_parse_dev_scope(
-            (void *)(drhd + 1),
-            ((void *)drhd) + header->length,
-            &dmaru->devices_cnt, &dmaru->devices);
-    else
-    {
-        printk(KERN_INFO PREFIX "found INCLUDE_ALL\n");
+    INIT_LIST_HEAD(&dmaru->ioapic_list);
+    dprintk(XENLOG_INFO VTDPREFIX, "dmaru->address = %lx\n", dmaru->address);
+
+    dev_scope_start = (void *)(drhd + 1);
+    dev_scope_end   = ((void *)drhd) + header->length;
+    ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
+                               dmaru, DMAR_TYPE);
+
+    if ( dmaru->include_all )
+    {
+        dprintk(XENLOG_INFO VTDPREFIX, "found INCLUDE_ALL\n");
         /* Only allow one INCLUDE_ALL */
         if ( include_all )
         {
-            printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
-                   "device scope is allowed\n");
+            dprintk(XENLOG_WARNING VTDPREFIX,
+                    "Only one INCLUDE_ALL device scope is allowed\n");
             ret = -EINVAL;
         }
         include_all = 1;
@@ -411,6 +476,7 @@ acpi_parse_one_rmrr(struct acpi_dmar_ent
 {
     struct acpi_table_rmrr *rmrr = (struct acpi_table_rmrr *)header;
     struct acpi_rmrr_unit *rmrru;
+    void *dev_scope_start, *dev_scope_end;
     int ret = 0;
 
     rmrru = xmalloc(struct acpi_rmrr_unit);
@@ -420,15 +486,10 @@ acpi_parse_one_rmrr(struct acpi_dmar_ent
 
     rmrru->base_address = rmrr->base_address;
     rmrru->end_address = rmrr->end_address;
-    printk(KERN_INFO PREFIX
-           "acpi_parse_one_rmrr: base=%"PRIx64" end=%"PRIx64"\n",
-           rmrr->base_address, rmrr->end_address);
-
-    ret = acpi_parse_dev_scope(
-        (void *)(rmrr + 1),
-        ((void*)rmrr) + header->length,
-        &rmrru->devices_cnt, &rmrru->devices);
-
+    dev_scope_start = (void *)(rmrr + 1);
+    dev_scope_end   = ((void *)rmrr) + header->length;
+    ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
+                               rmrru, RMRR_TYPE);
     if ( ret || (rmrru->devices_cnt == 0) )
         xfree(rmrru);
     else
@@ -443,6 +504,7 @@ acpi_parse_one_atsr(struct acpi_dmar_ent
     struct acpi_atsr_unit *atsru;
     int ret = 0;
     static int all_ports;
+    void *dev_scope_start, *dev_scope_end;
 
     atsru = xmalloc(struct acpi_atsr_unit);
     if ( !atsru )
@@ -451,18 +513,19 @@ acpi_parse_one_atsr(struct acpi_dmar_ent
 
     atsru->all_ports = atsr->flags & 1; /* BIT0: ALL_PORTS */
     if ( !atsru->all_ports )
-        ret = acpi_parse_dev_scope(
-            (void *)(atsr + 1),
-            ((void *)atsr) + header->length,
-            &atsru->devices_cnt, &atsru->devices);
-    else
-    {
-        printk(KERN_INFO PREFIX "found ALL_PORTS\n");
+    {
+        dev_scope_start = (void *)(atsr + 1);
+        dev_scope_end   = ((void *)atsr) + header->length;
+        ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
+                                   atsru, ATSR_TYPE);
+    }
+    else {
+        dprintk(XENLOG_INFO VTDPREFIX, "found ALL_PORTS\n");
         /* Only allow one ALL_PORTS */
         if ( all_ports )
         {
-            printk(KERN_WARNING PREFIX "Only one ALL_PORTS "
-                   "device scope is allowed\n");
+            dprintk(XENLOG_WARNING VTDPREFIX,
+                    "Only one ALL_PORTS device scope is allowed\n");
             ret = -EINVAL;
         }
         all_ports = 1;
@@ -488,19 +551,19 @@ static int __init acpi_parse_dmar(unsign
     dmar = (struct acpi_table_dmar *)__acpi_map_table(phys_addr, size);
     if ( !dmar )
     {
-        printk(KERN_WARNING PREFIX "Unable to map DMAR\n");
+        dprintk(XENLOG_WARNING VTDPREFIX, "Unable to map DMAR\n");
         return -ENODEV;
     }
 
     if ( !dmar->haw )
     {
-        printk(KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n");
+        dprintk(XENLOG_WARNING VTDPREFIX, "Zero: Invalid DMAR haw\n");
         return -EINVAL;
     }
 
     dmar_host_address_width = dmar->haw;
-    printk(KERN_INFO PREFIX "Host address width %d\n",
-           dmar_host_address_width);
+    dprintk(XENLOG_INFO VTDPREFIX, "Host address width %d\n",
+            dmar_host_address_width);
 
     entry_header = (struct acpi_dmar_entry_header *)(dmar + 1);
     while ( ((unsigned long)entry_header) <
@@ -509,19 +572,19 @@ static int __init acpi_parse_dmar(unsign
         switch ( entry_header->type )
         {
         case ACPI_DMAR_DRHD:
-            printk(KERN_INFO PREFIX "found ACPI_DMAR_DRHD\n");
+            dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_DRHD\n");
             ret = acpi_parse_one_drhd(entry_header);
             break;
         case ACPI_DMAR_RMRR:
-            printk(KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n");
+            dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_RMRR\n");
             ret = acpi_parse_one_rmrr(entry_header);
             break;
         case ACPI_DMAR_ATSR:
-            printk(KERN_INFO PREFIX "found ACPI_DMAR_ATSR\n");
+            dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_ATSR\n");
             ret = acpi_parse_one_atsr(entry_header);
             break;
         default:
-            printk(KERN_WARNING PREFIX "Unknown DMAR structure type\n");
+            dprintk(XENLOG_WARNING VTDPREFIX, "Unknown DMAR structure type\n");
             ret = -EINVAL;
             break;
         }
@@ -551,7 +614,7 @@ int acpi_dmar_init(void)
 
     if ( list_empty(&acpi_drhd_units) )
     {
-        printk(KERN_ERR PREFIX "No DMAR devices found\n");
+        dprintk(XENLOG_ERR VTDPREFIX, "No DMAR devices found\n");
         vtd_enabled = 0;
         return -ENODEV;
     }
diff -r 7643472d6b43 -r a868bd4236e6 xen/arch/x86/hvm/vmx/vtd/dmar.h
--- a/xen/arch/x86/hvm/vmx/vtd/dmar.h   Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/arch/x86/hvm/vmx/vtd/dmar.h   Fri Jan 18 13:49:48 2008 -0700
@@ -26,6 +26,20 @@
 
 extern u8 dmar_host_address_width;
 
+/* This one is for interrupt remapping */
+struct acpi_ioapic_unit {
+    struct list_head list;
+    int apic_id;
+    union {
+        u16 info;
+        struct {
+            u16 func: 3,
+                dev:  5,
+                bus:  8;
+        }bdf;
+    }ioapic;
+};
+
 struct acpi_drhd_unit {
     struct list_head list;
     unsigned long    address; /* register base address of the unit */
@@ -33,6 +47,7 @@ struct acpi_drhd_unit {
     int    devices_cnt;
     u8    include_all:1;
     struct iommu *iommu;
+    struct list_head ioapic_list;
 };
 
 struct acpi_rmrr_unit {
@@ -73,19 +88,9 @@ struct acpi_drhd_unit * acpi_find_matche
 struct acpi_drhd_unit * acpi_find_matched_drhd_unit(struct pci_dev *dev);
 struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev);
 
-/* This one is for interrupt remapping */
-struct acpi_ioapic_unit {
-    struct list_head list;
-    int apic_id;
-    union {
-        u16 info;
-        struct {
-            u16 bus: 8,
-                dev: 5,
-                func: 3;
-        }bdf;
-    }ioapic;
-};
+#define DMAR_TYPE 1
+#define RMRR_TYPE 2
+#define ATSR_TYPE 3
 
 #define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
 #define time_after(a,b)         \
diff -r 7643472d6b43 -r a868bd4236e6 xen/arch/x86/hvm/vmx/vtd/utils.c
--- a/xen/arch/x86/hvm/vmx/vtd/utils.c  Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/arch/x86/hvm/vmx/vtd/utils.c  Fri Jan 18 13:49:48 2008 -0700
@@ -146,12 +146,14 @@ u32 get_level_index(unsigned long gmfn, 
     return gmfn & LEVEL_MASK;
 }
 
-void print_vtd_entries(struct domain *d, int bus, int devfn,
-                       unsigned long gmfn)
+void print_vtd_entries(
+    struct domain *d, 
+    struct iommu *iommu,
+    int bus, int devfn,
+    unsigned long gmfn)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct acpi_drhd_unit *drhd;
-    struct iommu *iommu;
     struct context_entry *ctxt_entry;
     struct root_entry *root_entry;
     struct dma_pte pte;
@@ -175,7 +177,6 @@ void print_vtd_entries(struct domain *d,
     {
         printk("---- print_vtd_entries %d ----\n", i++);
 
-        iommu = drhd->iommu;
         root_entry = iommu->root_entry;
         if ( root_entry == NULL )
         {
diff -r 7643472d6b43 -r a868bd4236e6 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/arch/x86/traps.c      Fri Jan 18 13:49:48 2008 -0700
@@ -2825,7 +2825,7 @@ long unregister_guest_nmi_callback(void)
     return 0;
 }
 
-long do_set_trap_table(XEN_GUEST_HANDLE(trap_info_t) traps)
+long do_set_trap_table(XEN_GUEST_HANDLE(const_trap_info_t) traps)
 {
     struct trap_info cur;
     struct vcpu *curr = current;
diff -r 7643472d6b43 -r a868bd4236e6 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/arch/x86/x86_32/traps.c       Fri Jan 18 13:49:48 2008 -0700
@@ -419,7 +419,7 @@ static long unregister_guest_callback(st
 }
 
 
-long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
 {
     long ret;
 
diff -r 7643472d6b43 -r a868bd4236e6 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/arch/x86/x86_64/traps.c       Fri Jan 18 13:49:48 2008 -0700
@@ -470,7 +470,7 @@ static long unregister_guest_callback(st
 }
 
 
-long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
 {
     long ret;
 
diff -r 7643472d6b43 -r a868bd4236e6 xen/include/asm-x86/hvm/vmx/intel-iommu.h
--- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/include/asm-x86/hvm/vmx/intel-iommu.h Fri Jan 18 13:49:48 2008 -0700
@@ -127,32 +127,34 @@
 #define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
 
 /* GCMD_REG */
-#define DMA_GCMD_TE (((u64)1) << 31)
-#define DMA_GCMD_SRTP (((u64)1) << 30)
-#define DMA_GCMD_SFL (((u64)1) << 29)
-#define DMA_GCMD_EAFL (((u64)1) << 28)
-#define DMA_GCMD_WBF (((u64)1) << 27)
-#define DMA_GCMD_QIE (((u64)1) << 26)
-#define DMA_GCMD_IRE (((u64)1) << 25)
-#define DMA_GCMD_SIRTP (((u64)1) << 24)
+#define DMA_GCMD_TE     (((u64)1) << 31)
+#define DMA_GCMD_SRTP   (((u64)1) << 30)
+#define DMA_GCMD_SFL    (((u64)1) << 29)
+#define DMA_GCMD_EAFL   (((u64)1) << 28)
+#define DMA_GCMD_WBF    (((u64)1) << 27)
+#define DMA_GCMD_QIE    (((u64)1) << 26)
+#define DMA_GCMD_IRE    (((u64)1) << 25)
+#define DMA_GCMD_SIRTP  (((u64)1) << 24)
+#define DMA_GCMD_CFI    (((u64)1) << 23)
 
 /* GSTS_REG */
-#define DMA_GSTS_TES (((u64)1) << 31)
-#define DMA_GSTS_RTPS (((u64)1) << 30)
-#define DMA_GSTS_FLS (((u64)1) << 29)
-#define DMA_GSTS_AFLS (((u64)1) << 28)
-#define DMA_GSTS_WBFS (((u64)1) << 27)
-#define DMA_GSTS_IRTPS (((u64)1) << 24)
+#define DMA_GSTS_TES    (((u64)1) << 31)
+#define DMA_GSTS_RTPS   (((u64)1) << 30)
+#define DMA_GSTS_FLS    (((u64)1) << 29)
+#define DMA_GSTS_AFLS   (((u64)1) << 28)
+#define DMA_GSTS_WBFS   (((u64)1) << 27)
 #define DMA_GSTS_QIES   (((u64)1) <<26)
 #define DMA_GSTS_IRES   (((u64)1) <<25)
+#define DMA_GSTS_SIRTPS (((u64)1) << 24)
+#define DMA_GSTS_CFIS   (((u64)1) <<23)
 
 /* PMEN_REG */
-#define DMA_PMEN_EPM   (((u32)1) << 31)
-#define DMA_PMEN_PRS   (((u32)1) << 0)
+#define DMA_PMEN_EPM    (((u32)1) << 31)
+#define DMA_PMEN_PRS    (((u32)1) << 0)
 
 /* CCMD_REG */
 #define DMA_CCMD_INVL_GRANU_OFFSET  61
-#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_ICC   (((u64)1) << 63)
 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
@@ -171,8 +173,14 @@
 #define DMA_FECTL_IM (((u64)1) << 31)
 
 /* FSTS_REG */
-#define DMA_FSTS_PPF ((u64)2)
-#define DMA_FSTS_PFO ((u64)1)
+#define DMA_FSTS_PFO ((u64)1 << 0)
+#define DMA_FSTS_PPF ((u64)1 << 1)
+#define DMA_FSTS_AFO ((u64)1 << 2)
+#define DMA_FSTS_APF ((u64)1 << 3)
+#define DMA_FSTS_IQE ((u64)1 << 4)
+#define DMA_FSTS_ICE ((u64)1 << 5)
+#define DMA_FSTS_ITE ((u64)1 << 6)
+#define DMA_FSTS_FAULTS    DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | 
DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
 
 /* FRCD_REG, 32 bits access */
@@ -266,8 +274,10 @@ struct dma_pte {
 
 /* interrupt remap entry */
 struct iremap_entry {
+  union {
+    u64 lo_val;
     struct {
-        u64 present : 1,
+        u64 p       : 1,
             fpd     : 1,
             dm      : 1,
             rh      : 1,
@@ -279,12 +289,16 @@ struct iremap_entry {
             res_2   : 8,
             dst     : 32;
     }lo;
+  };
+  union {
+    u64 hi_val;
     struct {
         u64 sid     : 16,
             sq      : 2,
             svt     : 2,
             res_1   : 44;
     }hi;
+  };
 };
 #define IREMAP_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct iremap_entry))
 #define iremap_present(v) ((v).lo & 1)
@@ -386,11 +400,11 @@ struct poll_info {
 
 #define RESERVED_VAL        0
 
-#define TYPE_INVAL_CONTEXT  1
-#define TYPE_INVAL_IOTLB    2
-#define TYPE_INVAL_DEVICE_IOTLB 3
-#define TYPE_INVAL_IEC          4
-#define TYPE_INVAL_WAIT         5
+#define TYPE_INVAL_CONTEXT      0x1
+#define TYPE_INVAL_IOTLB        0x2
+#define TYPE_INVAL_DEVICE_IOTLB 0x3
+#define TYPE_INVAL_IEC          0x4
+#define TYPE_INVAL_WAIT         0x5
 
 #define NOTIFY_TYPE_POLL        1
 #define NOTIFY_TYPE_INTR        1
@@ -400,6 +414,10 @@ struct poll_info {
 
 #define IEC_GLOBAL_INVL         0
 #define IEC_INDEX_INVL          1
+#define IRTA_REG_EIME_SHIFT     11
+#define IRTA_REG_TABLE_SIZE     7    // 4k page = 256 * 16 byte entries
+                                     // 2^^(IRTA_REG_TABLE_SIZE + 1) = 256
+                                     // IRTA_REG_TABLE_SIZE = 7
 
 #define VTD_PAGE_TABLE_LEVEL_3  3
 #define VTD_PAGE_TABLE_LEVEL_4  4
@@ -414,4 +432,29 @@ extern struct list_head acpi_rmrr_units;
 extern struct list_head acpi_rmrr_units;
 extern struct list_head acpi_ioapic_units;
 
+struct qi_ctrl {
+    struct qinval_entry *qinval;         /* queue invalidation page */
+    int qinval_index;                    /* queue invalidation index */
+    spinlock_t qinval_lock;      /* lock for queue invalidation page */
+    spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
+    volatile u32 qinval_poll_status;     /* used by poll methord to sync */
+};
+
+struct ir_ctrl {
+    struct iremap_entry *iremap;         /* interrupt remap table */
+    int iremap_index;                    /* interrupt remap index */
+    spinlock_t iremap_lock;      /* lock for irq remappping table */
+};
+
+struct iommu_flush {
+    int (*context)(void *iommu, u16 did, u16 source_id, u8 function_mask, u64 
type, int non_present_entry_flush);
+    int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, u64 
type, int non_present_entry_flush);
+};
+
+struct intel_iommu {
+    struct qi_ctrl qi_ctrl;
+    struct ir_ctrl ir_ctrl;
+    struct iommu_flush flush; 
+};
+
 #endif
diff -r 7643472d6b43 -r a868bd4236e6 xen/include/asm-x86/hypercall.h
--- a/xen/include/asm-x86/hypercall.h   Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/include/asm-x86/hypercall.h   Fri Jan 18 13:49:48 2008 -0700
@@ -34,7 +34,7 @@ do_physdev_op_compat(
 
 extern long
 do_set_trap_table(
-    XEN_GUEST_HANDLE(trap_info_t) traps);
+    XEN_GUEST_HANDLE(const_trap_info_t) traps);
 
 extern int
 do_mmu_update(
diff -r 7643472d6b43 -r a868bd4236e6 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/include/public/arch-ia64.h    Fri Jan 18 13:49:48 2008 -0700
@@ -36,7 +36,9 @@
 #endif
 
 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-    ___DEFINE_XEN_GUEST_HANDLE(name, type)
+    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
+    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+
 #define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
 #define XEN_GUEST_HANDLE(name)          __guest_handle_ ## name
 #define XEN_GUEST_HANDLE_64(name)       XEN_GUEST_HANDLE(name)
@@ -47,18 +49,7 @@
 #endif
 
 #ifndef __ASSEMBLY__
-/* Guest handles for primitive C types. */
-__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
-__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-__DEFINE_XEN_GUEST_HANDLE(u64,   unsigned long);
-DEFINE_XEN_GUEST_HANDLE(char);
-DEFINE_XEN_GUEST_HANDLE(int);
-DEFINE_XEN_GUEST_HANDLE(long);
-DEFINE_XEN_GUEST_HANDLE(void);
-
 typedef unsigned long xen_pfn_t;
-DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #define PRI_xen_pfn "lx"
 #endif
 
diff -r 7643472d6b43 -r a868bd4236e6 xen/include/public/arch-powerpc.h
--- a/xen/include/public/arch-powerpc.h Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/include/public/arch-powerpc.h Fri Jan 18 13:49:48 2008 -0700
@@ -32,7 +32,8 @@
     } __attribute__((__aligned__(8))) __guest_handle_ ## name
 
 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-    ___DEFINE_XEN_GUEST_HANDLE(name, type)
+    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
+    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
 #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
 #define XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
 #define set_xen_guest_handle(hnd, val) \
@@ -47,17 +48,7 @@
 #endif
 
 #ifndef __ASSEMBLY__
-/* Guest handles for primitive C types. */
-__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
-__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-DEFINE_XEN_GUEST_HANDLE(char);
-DEFINE_XEN_GUEST_HANDLE(int);
-DEFINE_XEN_GUEST_HANDLE(long);
-DEFINE_XEN_GUEST_HANDLE(void);
-
 typedef unsigned long long xen_pfn_t;
-DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #define PRI_xen_pfn "llx"
 #endif
 
diff -r 7643472d6b43 -r a868bd4236e6 xen/include/public/arch-x86/xen.h
--- a/xen/include/public/arch-x86/xen.h Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/include/public/arch-x86/xen.h Fri Jan 18 13:49:48 2008 -0700
@@ -37,7 +37,8 @@
 #endif
 
 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-    ___DEFINE_XEN_GUEST_HANDLE(name, type)
+    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
+    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
 #define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
 #define __XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
 #define XEN_GUEST_HANDLE(name)          __XEN_GUEST_HANDLE(name)
@@ -53,17 +54,7 @@
 #endif
 
 #ifndef __ASSEMBLY__
-/* Guest handles for primitive C types. */
-__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
-__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-DEFINE_XEN_GUEST_HANDLE(char);
-DEFINE_XEN_GUEST_HANDLE(int);
-DEFINE_XEN_GUEST_HANDLE(long);
-DEFINE_XEN_GUEST_HANDLE(void);
-
 typedef unsigned long xen_pfn_t;
-DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #define PRI_xen_pfn "lx"
 #endif
 
diff -r 7643472d6b43 -r a868bd4236e6 xen/include/public/io/fsif.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/public/io/fsif.h      Fri Jan 18 13:49:48 2008 -0700
@@ -0,0 +1,181 @@
+/******************************************************************************
+ * fsif.h
+ * 
+ * Interface to FS level split device drivers.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007, Grzegorz Milos, Sun Microsystems, Inc.  
+ */
+
+#ifndef __XEN_PUBLIC_IO_FSIF_H__
+#define __XEN_PUBLIC_IO_FSIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+#define REQ_FILE_OPEN        1
+#define REQ_FILE_CLOSE       2
+#define REQ_FILE_READ        3
+#define REQ_FILE_WRITE       4
+#define REQ_STAT             5
+#define REQ_FILE_TRUNCATE    6
+#define REQ_REMOVE           7
+#define REQ_RENAME           8
+#define REQ_CREATE           9
+#define REQ_DIR_LIST        10
+#define REQ_CHMOD           11
+#define REQ_FS_SPACE        12
+#define REQ_FILE_SYNC       13
+
+struct fsif_open_request {
+    grant_ref_t gref;
+};
+
+struct fsif_close_request {
+    uint32_t fd;
+};
+
+struct fsif_read_request {
+    uint32_t fd;
+    grant_ref_t gref;
+    uint64_t len;
+    uint64_t offset;
+};
+
+struct fsif_write_request {
+    uint32_t fd;
+    grant_ref_t gref;
+    uint64_t len;
+    uint64_t offset;
+};
+
+struct fsif_stat_request {
+    uint32_t fd;
+    grant_ref_t gref;
+};
+
+/* This structure is a copy of some fields from stat structure, writen to the
+ * granted page. */
+struct fsif_stat_response {
+    int32_t  stat_mode;
+    uint32_t stat_uid;
+    uint32_t stat_gid;
+    int32_t  pad;
+    int64_t  stat_size;
+    int64_t  stat_atime;
+    int64_t  stat_mtime;
+    int64_t  stat_ctime;
+};
+
+struct fsif_truncate_request {
+    uint32_t fd;
+    int32_t pad;
+    int64_t length;
+};
+
+struct fsif_remove_request {
+    grant_ref_t gref;
+};
+
+struct fsif_rename_request {
+    uint16_t old_name_offset;
+    uint16_t new_name_offset;
+    grant_ref_t gref;
+};
+
+struct fsif_create_request {
+    int8_t directory;
+    int8_t pad;
+    int16_t pad2;
+    int32_t mode;
+    grant_ref_t gref;
+};
+
+struct fsif_list_request {
+    uint32_t offset;
+    grant_ref_t gref;
+};
+
+#define NR_FILES_SHIFT  0
+#define NR_FILES_SIZE   16   /* 16 bits for the number of files mask */
+#define NR_FILES_MASK   (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT)
+#define ERROR_SIZE      32   /* 32 bits for the error mask */
+#define ERROR_SHIFT     (NR_FILES_SIZE + NR_FILES_SHIFT)
+#define ERROR_MASK      (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT)
+#define HAS_MORE_SHIFT  (ERROR_SHIFT + ERROR_SIZE)    
+#define HAS_MORE_FLAG   (1ULL << HAS_MORE_SHIFT)
+
+struct fsif_chmod_request {
+    uint32_t fd;
+    int32_t mode;
+};
+
+struct fsif_space_request {
+    grant_ref_t gref;
+};
+
+struct fsif_sync_request {
+    uint32_t fd;
+};
+
+
+/* FS operation request */
+struct fsif_request {
+    uint8_t type;                 /* Type of the request                  */
+    uint8_t pad;
+    uint16_t id;                  /* Request ID, copied to the response   */
+    uint32_t pad2;
+    union {
+        struct fsif_open_request     fopen;
+        struct fsif_close_request    fclose;
+        struct fsif_read_request     fread;
+        struct fsif_write_request    fwrite;
+        struct fsif_stat_request     fstat;
+        struct fsif_truncate_request ftruncate;
+        struct fsif_remove_request   fremove;
+        struct fsif_rename_request   frename;
+        struct fsif_create_request   fcreate;
+        struct fsif_list_request     flist;
+        struct fsif_chmod_request    fchmod;
+        struct fsif_space_request    fspace;
+        struct fsif_sync_request     fsync;
+    } u;
+};
+typedef struct fsif_request fsif_request_t;
+
+/* FS operation response */
+struct fsif_response {
+    uint16_t id;
+    uint16_t pad1;
+    uint32_t pad2;
+    uint64_t ret_val;
+};
+
+typedef struct fsif_response fsif_response_t;
+
+
+DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response);
+
+#define STATE_INITIALISED     "init"
+#define STATE_READY           "ready"
+
+
+
+#endif
diff -r 7643472d6b43 -r a868bd4236e6 xen/include/public/xen.h
--- a/xen/include/public/xen.h  Thu Jan 17 12:17:14 2008 -0700
+++ b/xen/include/public/xen.h  Fri Jan 18 13:49:48 2008 -0700
@@ -37,6 +37,19 @@
 #include "arch-powerpc.h"
 #else
 #error "Unsupported architecture"
+#endif
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+DEFINE_XEN_GUEST_HANDLE(char);
+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
+DEFINE_XEN_GUEST_HANDLE(int);
+__DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
+DEFINE_XEN_GUEST_HANDLE(long);
+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_XEN_GUEST_HANDLE(void);
+
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #endif
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] merge with xen-unstable.hg staging, Xen patchbot-unstable <=