WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Merge

# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID fdea4a967bc77e52b0a55f7686d50caf28ad6e7c
# Parent  ff7c5a791ed58fbd9f5fd2db12b5a1b58fd11c4c
# Parent  98c6c36ac4443ce7f95c87eb317bf13d868e14e0
Merge

diff -r ff7c5a791ed5 -r fdea4a967bc7 buildconfigs/Rules.mk
--- a/buildconfigs/Rules.mk     Fri Oct 21 19:58:39 2005
+++ b/buildconfigs/Rules.mk     Mon Oct 24 15:08:13 2005
@@ -80,10 +80,12 @@
        rm -f patches/*/.makedep
 
 ref-%/.valid-ref: pristine-%/.valid-pristine
+       set -e
        rm -rf $(@D)
        cp -al $(<D) $(@D)
-       ([ -d patches/$* ] && \
-         for i in patches/$*/*.patch ; do ( cd $(@D) ; patch -p1 <../$$i || 
exit 1 ) ; done) || true
+       if [ -d patches/$* ] ; then \
+           for i in patches/$*/*.patch ; do ( cd $(@D) ; patch -p1 <../$$i || 
exit 1 ) ; done ; \
+       fi
        touch $@ # update timestamp to avoid rebuild
 endif
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c  Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c  Mon Oct 24 
15:08:13 2005
@@ -94,7 +94,7 @@
 
 irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
-    u32            l1, l2;
+    unsigned long  l1, l2;
     unsigned int   l1i, l2i, port;
     irqreturn_t (*handler)(int, void *, struct pt_regs *);
     shared_info_t *s = HYPERVISOR_shared_info;
@@ -108,14 +108,14 @@
     while ( l1 != 0 )
     {
         l1i = __ffs(l1);
-        l1 &= ~(1 << l1i);
+        l1 &= ~(1UL << l1i);
 
         while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
         {
             l2i = __ffs(l2);
-            l2 &= ~(1 << l2i);
+            l2 &= ~(1UL << l2i);
 
-            port = (l1i << 5) + l2i;
+            port = (l1i * BITS_PER_LONG) + l2i;
             if ( (handler = evtchns[port].handler) != NULL )
            {
                clear_evtchn(port);
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32        Fri Oct 
21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32        Mon Oct 
24 15:08:13 2005
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.12-xen
-# Wed Aug  3 10:04:25 2005
+# Linux kernel version: 2.6.12-xen0
+# Sat Oct 15 00:13:28 2005
 #
 CONFIG_XEN=y
 CONFIG_ARCH_XEN=y
@@ -151,17 +151,15 @@
 # CONFIG_REGPARM is not set
 CONFIG_X86_LOCAL_APIC=y
 CONFIG_X86_IO_APIC=y
-# CONFIG_HOTPLUG_CPU is not set
+CONFIG_HOTPLUG_CPU=y
 
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
 CONFIG_PCI=y
-# CONFIG_PCI_GOBIOS is not set
 # CONFIG_PCI_GOMMCONFIG is not set
 # CONFIG_PCI_GODIRECT is not set
 CONFIG_PCI_GOANY=y
-CONFIG_PCI_BIOS=y
 CONFIG_PCI_DIRECT=y
 CONFIG_PCI_MMCONFIG=y
 # CONFIG_PCIEPORTBUS is not set
@@ -199,8 +197,6 @@
 #
 CONFIG_HOTPLUG_PCI=m
 CONFIG_HOTPLUG_PCI_FAKE=m
-# CONFIG_HOTPLUG_PCI_COMPAQ is not set
-# CONFIG_HOTPLUG_PCI_IBM is not set
 # CONFIG_HOTPLUG_PCI_ACPI is not set
 CONFIG_HOTPLUG_PCI_CPCI=y
 CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
@@ -2667,6 +2663,7 @@
 CONFIG_ACPI_VIDEO=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
+# CONFIG_ACPI_HOTPLUG_CPU is not set
 CONFIG_ACPI_THERMAL=m
 CONFIG_ACPI_ASUS=m
 CONFIG_ACPI_IBM=m
@@ -2694,7 +2691,7 @@
 CONFIG_EXT3_FS_SECURITY=y
 CONFIG_JBD=m
 # CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=m
+CONFIG_FS_MBCACHE=y
 CONFIG_REISERFS_FS=m
 # CONFIG_REISERFS_CHECK is not set
 # CONFIG_REISERFS_PROC_INFO is not set
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile        Mon Oct 24 
15:08:13 2005
@@ -27,7 +27,7 @@
 c-obj-$(CONFIG_X86_CPUID)      += cpuid.o
 obj-$(CONFIG_MICROCODE)                += microcode.o
 c-obj-$(CONFIG_APM)            += apm.o
-obj-$(CONFIG_X86_SMP)          += smp.o smpboot.o
+obj-$(CONFIG_X86_SMP)          += smp.o
 #obj-$(CONFIG_X86_TRAMPOLINE)  += trampoline.o
 obj-$(CONFIG_X86_MPPARSE)      += mpparse.o
 obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S  Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S  Mon Oct 24 15:08:13 2005
@@ -40,7 +40,7 @@
 ENTRY(startup_32)
        movl %esi,xen_start_info
 
-#ifdef CONFIG_SMP
+#if 0
 ENTRY(startup_32_smp)
 #endif /* CONFIG_SMP */
 
@@ -78,7 +78,7 @@
        movl %eax,%gs
        cld                     # gcc2 wants the direction flag cleared at all 
times
 
-#ifdef CONFIG_SMP
+#if 0
        movb ready, %cl 
        cmpb $1,%cl
        je 1f                   # the first CPU calls start_kernel
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c    Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c    Mon Oct 24 
15:08:13 2005
@@ -136,9 +136,6 @@
 #endif
 
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_callout_map);
 EXPORT_SYMBOL(__write_lock_failed);
 EXPORT_SYMBOL(__read_lock_failed);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Mon Oct 24 
15:08:13 2005
@@ -112,44 +112,6 @@
        }
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-#include <asm/nmi.h>
-#ifdef CONFIG_SMP
-extern void smp_suspend(void);
-extern void smp_resume(void);
-#endif
-/* We don't actually take CPU down, just spin without interrupts. */
-static inline void play_dead(void)
-{
-       /* Death loop */
-       while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
-               HYPERVISOR_sched_op(SCHEDOP_yield, 0);
-
-       __flush_tlb_all();
-   /* 
-    * Restore IPI/IRQ mappings before marking online to prevent 
-    * race between pending interrupts and restoration of handler. 
-    */
-#ifdef CONFIG_SMP
-       local_irq_enable(); /* XXX Needed for smp_resume(). Clean me up. */
-       smp_resume();
-#endif
-       cpu_set(smp_processor_id(), cpu_online_map);
-}
-#else
-static inline void play_dead(void)
-{
-       BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-void cpu_restore(void)
-{
-       play_dead();
-       local_irq_enable();
-       cpu_idle();
-}
-
 /*
  * The idle thread. There's no useful work to be
  * done, so just try to conserve power and have a
@@ -158,7 +120,9 @@
  */
 void cpu_idle (void)
 {
+#if defined(CONFIG_HOTPLUG_CPU)
        int cpu = _smp_processor_id();
+#endif
 
        /* endless idle loop with no priority at all */
        while (1) {
@@ -168,23 +132,12 @@
                                __get_cpu_var(cpu_idle_state) = 0;
                        rmb();
 
+#if defined(CONFIG_HOTPLUG_CPU)
                        if (cpu_is_offline(cpu)) {
-                               local_irq_disable();
-#ifdef CONFIG_SMP
-                               smp_suspend();
-#endif
-#if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU)
-                               /* Ack it.  From this point on until
-                                  we get woken up, we're not allowed
-                                  to take any locks.  In particular,
-                                  don't printk. */
-                               __get_cpu_var(cpu_state) = CPU_DEAD;
-                               /* Tell hypervisor to take vcpu down. */
                                HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
-#endif
-                               play_dead();
                                local_irq_enable();
                        }
+#endif
 
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
                        xen_idle();
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Mon Oct 24 15:08:13 2005
@@ -939,6 +939,8 @@
        if ( xen_override_max_pfn < xen_start_info->nr_pages )
                xen_override_max_pfn = xen_start_info->nr_pages;
        max_pfn = xen_override_max_pfn;
+       /* 8MB slack, to make up for address space allocations in backends. */
+       max_pfn += 8 << (20 - PAGE_SHIFT);
 }
 #endif /* XEN */
 
@@ -1638,39 +1640,17 @@
 #endif
 
        /* Make sure we have a correctly sized P->M table. */
-       if (max_pfn != xen_start_info->nr_pages) {
-               phys_to_machine_mapping = alloc_bootmem_low_pages(
-                       max_pfn * sizeof(unsigned long));
-
-               if (max_pfn > xen_start_info->nr_pages) {
-                       /* set to INVALID_P2M_ENTRY */
-                       memset(phys_to_machine_mapping, ~0,
-                               max_pfn * sizeof(unsigned long));
-                       memcpy(phys_to_machine_mapping,
-                               (unsigned long *)xen_start_info->mfn_list,
-                               xen_start_info->nr_pages * sizeof(unsigned 
long));
-               } else {
-                       struct xen_memory_reservation reservation = {
-                               .extent_start = (unsigned long 
*)xen_start_info->mfn_list + max_pfn,
-                               .nr_extents   = xen_start_info->nr_pages - 
max_pfn,
-                               .extent_order = 0,
-                               .domid        = DOMID_SELF
-                       };
-
-                       memcpy(phys_to_machine_mapping,
-                               (unsigned long *)xen_start_info->mfn_list,
-                               max_pfn * sizeof(unsigned long));
-                       BUG_ON(HYPERVISOR_memory_op(
-                               XENMEM_decrease_reservation,
-                               &reservation) !=
-                           (xen_start_info->nr_pages - max_pfn));
-               }
-               free_bootmem(
-                       __pa(xen_start_info->mfn_list), 
-                       PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-                       sizeof(unsigned long))));
-       }
-
+       phys_to_machine_mapping = alloc_bootmem_low_pages(
+               max_pfn * sizeof(unsigned long));
+       memset(phys_to_machine_mapping, ~0,
+               max_pfn * sizeof(unsigned long));
+       memcpy(phys_to_machine_mapping,
+               (unsigned long *)xen_start_info->mfn_list,
+               xen_start_info->nr_pages * sizeof(unsigned long));
+       free_bootmem(
+               __pa(xen_start_info->mfn_list), 
+               PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
+               sizeof(unsigned long))));
 
        /* 
         * Initialise the list of the frames that specify the list of 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c  Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c  Mon Oct 24 15:08:13 2005
@@ -122,7 +122,8 @@
 static u64 processed_system_time;   /* System time (ns) at last processing. */
 static DEFINE_PER_CPU(u64, processed_system_time);
 
-#define NS_PER_TICK (1000000000ULL/HZ)
+/* Must be signed, as it's compared with s64 quantities which can be -ve. */
+#define NS_PER_TICK (1000000000LL/HZ)
 
 static inline void __normalize_time(time_t *sec, s64 *nsec)
 {
@@ -235,9 +236,9 @@
 
        /* Adjust wall-clock time base based on wall_jiffies ticks. */
        wc_nsec = processed_system_time;
-       wc_nsec += (u64)sec * 1000000000ULL;
-       wc_nsec += (u64)nsec;
-       wc_nsec -= (jiffies - wall_jiffies) * (u64)(NSEC_PER_SEC / HZ);
+       wc_nsec += sec * (u64)NSEC_PER_SEC;
+       wc_nsec += nsec;
+       wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
 
        /* Split wallclock base into seconds and nanoseconds. */
        tmp = wc_nsec;
@@ -437,7 +438,7 @@
         * be stale, so we can retry with fresh ones.
         */
        for ( ; ; ) {
-               nsec = (s64)tv->tv_nsec - (s64)get_nsec_offset(shadow);
+               nsec = tv->tv_nsec - get_nsec_offset(shadow);
                if (time_values_up_to_date(cpu))
                        break;
                get_time_values_from_xen();
@@ -558,7 +559,7 @@
        }
        while (!time_values_up_to_date(cpu));
 
-       if (unlikely(delta < (s64)-1000000) || unlikely(delta_cpu < 0)) {
+       if (unlikely(delta < -1000000LL) || unlikely(delta_cpu < 0)) {
                printk("Timer ISR/%d: Time went backwards: "
                       "delta=%lld cpu_delta=%lld shadow=%lld "
                       "off=%lld processed=%lld cpu_processed=%lld\n",
@@ -784,7 +785,7 @@
        rdtscll(vxtime.last_tsc);
 #endif
 
-       per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER);
+       per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER, 0);
        (void)setup_irq(per_cpu(timer_irq, 0), &irq_timer);
 }
 
@@ -802,7 +803,7 @@
                 * but that's ok: we'll just end up with a shorter timeout. */
                if (delta < 1) 
                        delta = 1;
-               st = processed_system_time + ((u64)delta * NS_PER_TICK);
+               st = processed_system_time + (delta * (u64)NS_PER_TICK);
        } while (read_seqretry(&xtime_lock, seq));
 
        return st;
@@ -851,21 +852,12 @@
 
 #ifdef CONFIG_SMP
 static char timer_name[NR_CPUS][15];
-void local_setup_timer_irq(void)
-{
-       int cpu = smp_processor_id();
-
-       if (cpu == 0)
-               return;
-       per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER);
-       sprintf(timer_name[cpu], "timer%d", cpu);
-       BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
-                          SA_INTERRUPT, timer_name[cpu], NULL));
-}
-
-void local_setup_timer(void)
-{
-       int seq, cpu = smp_processor_id();
+
+void local_setup_timer(unsigned int cpu)
+{
+       int seq;
+
+       BUG_ON(cpu == 0);
 
        do {
                seq = read_seqbegin(&xtime_lock);
@@ -873,17 +865,17 @@
                        per_cpu(shadow_time, cpu).system_timestamp;
        } while (read_seqretry(&xtime_lock, seq));
 
-       local_setup_timer_irq();
-}
-
-void local_teardown_timer_irq(void)
-{
-       int cpu = smp_processor_id();
-
-       if (cpu == 0)
-               return;
+       per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER, cpu);
+       sprintf(timer_name[cpu], "timer%d", cpu);
+       BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
+                          SA_INTERRUPT, timer_name[cpu], NULL));
+}
+
+void local_teardown_timer(unsigned int cpu)
+{
+       BUG_ON(cpu == 0);
        free_irq(per_cpu(timer_irq, cpu), NULL);
-       unbind_virq_from_irq(VIRQ_TIMER);
+       unbind_virq_from_irq(VIRQ_TIMER, cpu);
 }
 #endif
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Mon Oct 24 15:08:13 2005
@@ -123,9 +123,18 @@
        return __direct_remap_pfn_range(
                vma->vm_mm, address, mfn, size, prot, domid);
 }
-
 EXPORT_SYMBOL(direct_remap_pfn_range);
 
+int direct_kernel_remap_pfn_range(unsigned long address, 
+                                 unsigned long mfn,
+                                 unsigned long size, 
+                                 pgprot_t prot,
+                                 domid_t  domid)
+{
+       return __direct_remap_pfn_range(
+               &init_mm, address, mfn, size, prot, domid);
+}
+EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
 
 /* FIXME: This is horribly broken on PAE */ 
 static int lookup_pte_fn(
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/kernel/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/kernel/Makefile     Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/Makefile     Mon Oct 24 15:08:13 2005
@@ -15,4 +15,4 @@
 
 obj-$(CONFIG_PROC_FS) += xen_proc.o
 obj-$(CONFIG_NET)     += skbuff.o
-obj-$(CONFIG_SMP)     += smp.o
+obj-$(CONFIG_SMP)     += smp.o smpboot.o
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Mon Oct 24 15:08:13 2005
@@ -42,6 +42,7 @@
 #include <asm-xen/xen-public/physdev.h>
 #include <asm/hypervisor.h>
 #include <asm-xen/evtchn.h>
+#include <linux/mc146818rtc.h> /* RTC_IRQ */
 
 /*
  * This lock protects updates to the following mapping and reference-count
@@ -70,8 +71,8 @@
 
 #ifdef CONFIG_SMP
 
-static u8  cpu_evtchn[NR_EVENT_CHANNELS];
-static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
 
 #define active_evtchns(cpu,sh,idx)             \
        ((sh)->evtchn_pending[idx] &            \
@@ -136,7 +137,7 @@
 /* NB. Interrupts are disabled on entry. */
 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
 {
-       u32     l1, l2;
+       unsigned long  l1, l2;
        unsigned int   l1i, l2i, port;
        int            irq, cpu = smp_processor_id();
        shared_info_t *s = HYPERVISOR_shared_info;
@@ -148,13 +149,13 @@
        l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
        while (l1 != 0) {
                l1i = __ffs(l1);
-               l1 &= ~(1 << l1i);
+               l1 &= ~(1UL << l1i);
         
                while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
                        l2i = __ffs(l2);
-                       l2 &= ~(1 << l2i);
+                       l2 &= ~(1UL << l2i);
             
-                       port = (l1i << 5) + l2i;
+                       port = (l1i * BITS_PER_LONG) + l2i;
                        if ((irq = evtchn_to_irq[port]) != -1)
                                do_IRQ(irq, regs);
                        else
@@ -178,11 +179,10 @@
        return irq;
 }
 
-int bind_virq_to_irq(int virq)
+int bind_virq_to_irq(int virq, int cpu)
 {
        evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
        int evtchn, irq;
-       int cpu = smp_processor_id();
 
        spin_lock(&irq_mapping_update_lock);
 
@@ -209,10 +209,9 @@
 }
 EXPORT_SYMBOL(bind_virq_to_irq);
 
-void unbind_virq_from_irq(int virq)
+void unbind_virq_from_irq(int virq, int cpu)
 {
        evtchn_op_t op = { .cmd = EVTCHNOP_close };
-       int cpu    = smp_processor_id();
        int irq    = per_cpu(virq_to_irq, cpu)[virq];
        int evtchn = irq_to_evtchn[irq];
 
@@ -240,11 +239,10 @@
 }
 EXPORT_SYMBOL(unbind_virq_from_irq);
 
-int bind_ipi_to_irq(int ipi)
+int bind_ipi_to_irq(int ipi, int cpu)
 {
        evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
        int evtchn, irq;
-       int cpu = smp_processor_id();
 
        spin_lock(&irq_mapping_update_lock);
 
@@ -272,10 +270,9 @@
 }
 EXPORT_SYMBOL(bind_ipi_to_irq);
 
-void unbind_ipi_from_irq(int ipi)
+void unbind_ipi_from_irq(int ipi, int cpu)
 {
        evtchn_op_t op = { .cmd = EVTCHNOP_close };
-       int cpu    = smp_processor_id();
        int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
        int irq    = evtchn_to_irq[evtchn];
 
@@ -748,6 +745,13 @@
        {
                irq_bindcount[pirq_to_irq(i)] = 1;
 
+#ifdef RTC_IRQ
+               /* If not domain 0, force our RTC driver to fail its probe. */
+               if ((i == RTC_IRQ) &&
+                   !(xen_start_info->flags & SIF_INITDOMAIN))
+                       continue;
+#endif
+
                irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
                irq_desc[pirq_to_irq(i)].action  = 0;
                irq_desc[pirq_to_irq(i)].depth   = 1;
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/kernel/reboot.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Mon Oct 24 15:08:13 2005
@@ -26,7 +26,6 @@
 // the distinction when we return the reason code to them.
 #define SHUTDOWN_HALT      4
 
-
 void machine_restart(char * __unused)
 {
        /* We really want to get pending console data out before we die. */
@@ -60,6 +59,8 @@
 
 /* Ignore multiple shutdown requests. */
 static int shutting_down = SHUTDOWN_INVALID;
+static void __shutdown_handler(void *unused);
+static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
 
 #ifndef CONFIG_HOTPLUG_CPU
 #define cpu_down(x) (-EOPNOTSUPP)
@@ -243,40 +244,46 @@
        return 0;
 }
 
-static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
-                                                void *arg,
-                                                const char *name,
-                                                int cpu)
+static int kthread_create_on_cpu(int (*f)(void *arg),
+                                void *arg,
+                                const char *name,
+                                int cpu)
 {
        struct task_struct *p;
        p = kthread_create(f, arg, name);
+       if (IS_ERR(p))
+               return PTR_ERR(p);
        kthread_bind(p, cpu);
        wake_up_process(p);
-       return p;
+       return 0;
 }
 
 static void __shutdown_handler(void *unused)
 {
        int err;
 
-       if (shutting_down != SHUTDOWN_SUSPEND) {
+       if (shutting_down != SHUTDOWN_SUSPEND)
                err = kernel_thread(shutdown_process, NULL,
                                    CLONE_FS | CLONE_FILES);
-               if ( err < 0 )
-                       printk(KERN_ALERT "Error creating shutdown "
-                              "process!\n");
-       } else {
-               kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
+       else
+               err = kthread_create_on_cpu(__do_suspend, NULL, "suspend", 0);
+
+       if ( err < 0 ) {
+               printk(KERN_WARNING "Error creating shutdown process (%d): "
+                      "retrying...\n", -err);
+               schedule_delayed_work(&shutdown_work, HZ/2);
        }
 }
 
 static void shutdown_handler(struct xenbus_watch *watch,
                             const char **vec, unsigned int len)
 {
-       static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
        char *str;
        struct xenbus_transaction *xbt;
        int err;
+
+       if (shutting_down != SHUTDOWN_INVALID)
+               goto out;
 
  again:
        xbt = xenbus_transaction_start();
@@ -312,6 +319,7 @@
 
        kfree(str);
 
+ out:
        if (shutting_down != SHUTDOWN_INVALID)
                schedule_work(&shutdown_work);
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile      Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile      Mon Oct 24 
15:08:13 2005
@@ -25,7 +25,7 @@
 c-obj-$(CONFIG_X86_MSR)                += msr.o
 obj-$(CONFIG_MICROCODE)                += microcode.o
 obj-$(CONFIG_X86_CPUID)                += cpuid.o
-obj-$(CONFIG_SMP)              += smp.o smpboot.o
+obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o
 c-obj-$(CONFIG_X86_LOCAL_APIC) += nmi.o
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o mpparse.o
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/e820.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/e820.c        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/e820.c        Mon Oct 24 
15:08:13 2005
@@ -519,7 +519,7 @@
        e820_print_map(who);
 }
 
-#else  /* CONFIX_XEN */
+#else  /* CONFIG_XEN */
 
 extern unsigned long xen_override_max_pfn;
 extern union xen_start_info_union xen_start_info_union;
@@ -528,10 +528,13 @@
 {
         unsigned long max_end_pfn = xen_start_info->nr_pages;
 
-       if ( xen_override_max_pfn <  max_end_pfn)
+       if ( xen_override_max_pfn < max_end_pfn)
                xen_override_max_pfn = max_end_pfn;
-       
-        return xen_override_max_pfn;
+
+       /* 8MB slack, to make up for address space allocations in backends. */
+       xen_override_max_pfn += 8 << (20 - PAGE_SHIFT);
+
+       return xen_override_max_pfn;
 }
 
 void __init e820_reserve_resources(void) 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c       Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c       Mon Oct 24 
15:08:13 2005
@@ -429,7 +429,7 @@
 static void __init contig_initmem_init(void)
 {
         unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
-        free_bootmem(0, end_pfn << PAGE_SHIFT);   
+        free_bootmem(0, xen_start_info->nr_pages << PAGE_SHIFT);
         reserve_bootmem(HIGH_MEMORY,
                         (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
                         - HIGH_MEMORY);
@@ -733,20 +733,22 @@
 #ifdef CONFIG_XEN
        {
                int i, j, k, fpp;
+
                /* Make sure we have a large enough P->M table. */
-               if (end_pfn > xen_start_info->nr_pages) {
-                       phys_to_machine_mapping = alloc_bootmem(
-                               end_pfn * sizeof(unsigned long));
-                       memset(phys_to_machine_mapping, ~0,
-                              end_pfn * sizeof(unsigned long));
-                       memcpy(phys_to_machine_mapping,
-                              (unsigned long *)xen_start_info->mfn_list,
-                              xen_start_info->nr_pages * sizeof(unsigned 
long));
-                       free_bootmem(
-                               __pa(xen_start_info->mfn_list), 
-                               PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-                                               sizeof(unsigned long))));
-               }
+               phys_to_machine_mapping = alloc_bootmem(
+                       end_pfn * sizeof(unsigned long));
+               memset(phys_to_machine_mapping, ~0,
+                      end_pfn * sizeof(unsigned long));
+               memcpy(phys_to_machine_mapping,
+                      (unsigned long *)xen_start_info->mfn_list,
+                      xen_start_info->nr_pages * sizeof(unsigned long));
+               free_bootmem(
+                       __pa(xen_start_info->mfn_list), 
+                       PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
+                                       sizeof(unsigned long))));
+               make_pages_readonly((void *)xen_start_info->mfn_list,
+                                   PFN_UP(xen_start_info->nr_pages *
+                                          sizeof(unsigned long)));
 
                /* 
                 * Initialise the list of the frames that specify the list of 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c Mon Oct 24 
15:08:13 2005
@@ -113,14 +113,11 @@
 
 EXPORT_SYMBOL(cpu_pda);
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(__write_lock_failed);
 EXPORT_SYMBOL(__read_lock_failed);
 
 EXPORT_SYMBOL(synchronize_irq);
 EXPORT_SYMBOL(smp_call_function);
-EXPORT_SYMBOL(cpu_callout_map);
 #endif
 
 #ifdef CONFIG_VT
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Mon Oct 24 
15:08:13 2005
@@ -502,7 +502,7 @@
                .extent_order = 0,
                .domid        = DOMID_SELF
        };
-       set_pte(pte, __pte_ma(0));
+       set_pte_at(&init_mm, addr, pte, __pte_ma(0));
        phys_to_machine_mapping[__pa(addr) >> PAGE_SHIFT] =
                INVALID_P2M_ENTRY;
        BUG_ON(HYPERVISOR_memory_op(
@@ -521,10 +521,9 @@
 
        scrub_pages(vstart, 1 << order);
 
+       balloon_lock(flags);
        BUG_ON(generic_page_range(
                &init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL));
-
-       balloon_lock(flags);
        current_pages -= 1UL << order;
        balloon_unlock(flags);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Mon Oct 24 
15:08:13 2005
@@ -494,10 +494,6 @@
        int i;
        struct page *page;
 
-       if (!(xen_start_info->flags & SIF_INITDOMAIN) &&
-           !(xen_start_info->flags & SIF_BLK_BE_DOMAIN))
-               return 0;
-
        blkif_interface_init();
 
        page = balloon_alloc_empty_page_range(MMAP_PAGES);
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Mon Oct 24 
15:08:13 2005
@@ -725,21 +725,9 @@
        .suspend = blkfront_suspend,
 };
 
-static void __init init_blk_xenbus(void)
+static int __init xlblk_init(void)
 {
        xenbus_register_driver(&blkfront);
-}
-
-static int __init xlblk_init(void)
-{
-       if ((xen_start_info->flags & SIF_INITDOMAIN) ||
-           (xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
-               return 0;
-
-       IPRINTK("Initialising virtual block device driver\n");
-
-       init_blk_xenbus();
-
        return 0;
 }
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Mon Oct 24 15:08:13 2005
@@ -861,11 +861,7 @@
 {
        int i, j, err;
        struct page *page;
-/*
-  if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
-  !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
-  return 0;
-*/
+
        blkif_interface_init();
 
        page = balloon_alloc_empty_page_range(MMAP_PAGES);
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/console/console.c
--- a/linux-2.6-xen-sparse/drivers/xen/console/console.c        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c        Mon Oct 24 
15:08:13 2005
@@ -768,7 +768,7 @@
 #endif
 
        if (xen_start_info->flags & SIF_INITDOMAIN) {
-               xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
+               xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
                (void)request_irq(xencons_priv_irq,
                                  xencons_priv_interrupt, 0, "console", NULL);
        } else {
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c
--- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c   Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c   Mon Oct 24 
15:08:13 2005
@@ -34,14 +34,18 @@
 {
        int sent = 0;
        struct xencons_interface *intf = xencons_interface();
+       XENCONS_RING_IDX cons, prod;
 
-       while ((sent < len) &&
-              (intf->out_prod - intf->out_cons) < sizeof(intf->out)) {
-               intf->out[MASK_XENCONS_IDX(intf->out_prod, intf->out)] =
-                       data[sent];
-               intf->out_prod++;
-               sent++;
-       }
+       cons = intf->out_cons;
+       prod = intf->out_prod;
+       mb();
+       BUG_ON((prod - cons) > sizeof(intf->out));
+
+       while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
+               intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
+
+       wmb();
+       intf->out_prod = prod;
 
        /* Use evtchn: this is called early, before irq is set up. */
        notify_remote_via_evtchn(xen_start_info->console_evtchn);
@@ -52,15 +56,22 @@
 static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
 {
        struct xencons_interface *intf = xencons_interface();
+       XENCONS_RING_IDX cons, prod;
 
-       while (intf->in_cons != intf->in_prod) {
+       cons = intf->in_cons;
+       prod = intf->in_prod;
+       mb();
+       BUG_ON((prod - cons) > sizeof(intf->in));
+
+       while (cons != prod) {
                if (xencons_receiver != NULL)
                        xencons_receiver(
-                               intf->in + MASK_XENCONS_IDX(intf->in_cons,
-                                                           intf->in),
+                               intf->in + MASK_XENCONS_IDX(cons++, intf->in),
                                1, regs);
-               intf->in_cons++;
        }
+
+       wmb();
+       intf->in_cons = cons;
 
        return IRQ_HANDLED;
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Mon Oct 24 
15:08:13 2005
@@ -58,7 +58,7 @@
        dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
        if (dev == NULL) {
                DPRINTK("Could not create netif: out of memory\n");
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        netif = netdev_priv(dev);
@@ -104,7 +104,7 @@
                DPRINTK("Could not register new net device %s: err=%d\n",
                        dev->name, err);
                free_netdev(dev);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        DPRINTK("Successfully created netif\n");
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Oct 24 
15:08:13 2005
@@ -781,12 +781,6 @@
        int i;
        struct page *page;
 
-       if (!(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
-           !(xen_start_info->flags & SIF_INITDOMAIN))
-               return 0;
-
-       IPRINTK("Initialising Xen netif backend.\n");
-
        /* We can increase reservation by this much in net_rx_action(). */
        balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
 
@@ -817,7 +811,7 @@
 
        netif_xenbus_init();
 
-       (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+       (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG, 0),
                          netif_be_dbg, SA_SHIRQ, 
                          "net-be-dbg", &netif_be_dbg);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Mon Oct 24 15:08:13 2005
@@ -28,10 +28,6 @@
        netif_t *netif;
 
        long int frontend_id;
-#if 0
-       long int pdev;
-       long int readonly;
-#endif
 
        /* watch back end for changes */
        struct xenbus_watch backend_watch;
@@ -155,14 +151,6 @@
                        return;
                }
 
-#if 0
-               err = vbd_create(be->netif, handle, be->pdev, be->readonly);
-               if (err) {
-                       xenbus_dev_error(dev, err, "creating vbd structure");
-                       return;
-               }
-#endif
-
                kobject_hotplug(&dev->dev.kobj, KOBJ_ONLINE);
 
                /* Pass in NULL node to skip exist test. */
@@ -173,33 +161,26 @@
 static int netback_hotplug(struct xenbus_device *xdev, char **envp,
                           int num_envp, char *buffer, int buffer_size)
 {
-       struct backend_info *be;
-       netif_t *netif;
-       char **key, *val;
+       struct backend_info *be = xdev->data;
+       netif_t *netif = be->netif;
        int i = 0, length = 0;
-       static char *env_vars[] = { "script", "domain", "mac", "bridge", "ip",
-                                   NULL };
-
-       be = xdev->data;
-       netif = be->netif;
+
+       char *val = xenbus_read(NULL, xdev->nodename, "script", NULL);
+       if (IS_ERR(val)) {
+               int err = PTR_ERR(val);
+               xenbus_dev_error(xdev, err, "reading script");
+               return err;
+       }
+       else {
+               add_hotplug_env_var(envp, num_envp, &i,
+                                   buffer, buffer_size, &length,
+                                   "script=%s", val);
+               kfree(val);
+       }
 
        add_hotplug_env_var(envp, num_envp, &i,
                            buffer, buffer_size, &length,
                            "vif=%s", netif->dev->name);
-
-       key = env_vars;
-       while (*key != NULL) {
-               val = xenbus_read(NULL, xdev->nodename, *key, NULL);
-               if (!IS_ERR(val)) {
-                       char buf[strlen(*key) + 4];
-                       sprintf(buf, "%s=%%s", *key);
-                       add_hotplug_env_var(envp, num_envp, &i,
-                                           buffer, buffer_size, &length,
-                                           buf, val);
-                       kfree(val);
-               }
-               key++;
-       }
 
        envp[i] = NULL;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c
--- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Mon Oct 24 
15:08:13 2005
@@ -25,8 +25,6 @@
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
 #include <asm/tlb.h>
-#include <asm-xen/xen-public/xen.h>
-#include <asm/hypervisor.h>
 #include <asm-xen/linux-public/privcmd.h>
 #include <asm/hypervisor.h>
 #include <asm-xen/xen-public/xen.h>
@@ -219,41 +217,6 @@
        }
        break;
 
-       case IOCTL_PRIVCMD_INITDOMAIN_STORE: {
-               extern int do_xenbus_probe(void*);
-               unsigned long page;
-
-               if (xen_start_info->store_evtchn != 0) {
-                       ret = xen_start_info->store_mfn;
-                       break;
-               }
-
-               /* Allocate page. */
-               page = get_zeroed_page(GFP_KERNEL);
-               if (!page) {
-                       ret = -ENOMEM;
-                       break;
-               }
-
-               /* We don't refcnt properly, so set reserved on page.
-                * (this allocation is permanent) */
-               SetPageReserved(virt_to_page(page));
-
-               /* Initial connect. Setup channel and page. */
-               xen_start_info->store_evtchn = data;
-               xen_start_info->store_mfn =
-                       pfn_to_mfn(virt_to_phys((void *)page) >>
-                                  PAGE_SHIFT);
-               ret = xen_start_info->store_mfn;
-
-               /* 
-               ** Complete initialization of xenbus (viz. set up the 
-               ** connection to xenstored now that it has started). 
-               */
-               kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
-       }
-       break;
-
        default:
                ret = -EINVAL;
                break;
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c        Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c        Mon Oct 24 
15:08:13 2005
@@ -17,6 +17,7 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/miscdevice.h>
+#include <linux/poll.h>
 #include <asm/uaccess.h>
 #include <asm-xen/xenbus.h>
 #include <asm-xen/xen-public/grant_table.h>
@@ -680,9 +681,14 @@
 }
 
 static unsigned int
-vtpm_op_poll(struct file *file, struct poll_table_struct *pst)
-{
-       return 0;
+vtpm_op_poll(struct file *file, struct poll_table_struct *pts)
+{
+       unsigned int flags = POLLOUT | POLLWRNORM;
+       poll_wait(file, &dataex.wait_queue, pts);
+       if (!list_empty(&dataex.pending_pak)) {
+               flags |= POLLIN | POLLRDNORM;
+       }
+       return flags;
 }
 
 static struct file_operations vtpm_ops = {
@@ -1070,11 +1076,6 @@
 tpmback_init(void)
 {
        int rc;
-       if (!(xen_start_info->flags & SIF_TPM_BE_DOMAIN) &&
-           !(xen_start_info->flags & SIF_INITDOMAIN)) {
-               printk(KERN_ALERT "Neither TPM-BE Domain nor INIT domain!\n");
-               return 0;
-       }
 
        if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
                printk(KERN_ALERT "Could not register misc device for TPM 
BE.\n");
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c      Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c      Mon Oct 24 
15:08:13 2005
@@ -39,6 +39,7 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/tpmfe.h>
+#include <linux/err.h>
 
 #include <asm/semaphore.h>
 #include <asm/io.h>
@@ -372,7 +373,7 @@
        info->watch.callback = watch_for_status;
        err = register_xenbus_watch(&info->watch);
        if (err) {
-               message = "registering watch on backend";
+               xenbus_dev_error(dev, err, "registering watch on backend");
                goto destroy_tpmring;
        }
 
@@ -398,6 +399,8 @@
        int err;
        struct tpmfront_info *info;
        int handle;
+       int len = max(XS_WATCH_PATH, XS_WATCH_TOKEN) + 1;
+       const char *vec[len];
 
        err = xenbus_scanf(NULL, dev->nodename,
                           "handle", "%i", &handle);
@@ -426,6 +429,10 @@
                dev->data = NULL;
                return err;
        }
+
+       vec[XS_WATCH_PATH]  = info->watch.node;
+       vec[XS_WATCH_TOKEN] = NULL;
+       watch_for_status(&info->watch, vec, len);
 
        return 0;
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c    Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c    Mon Oct 24 
15:08:13 2005
@@ -33,164 +33,139 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <asm-xen/xenbus.h>
 #include "xenbus_comms.h"
 
-#define RINGBUF_DATASIZE ((PAGE_SIZE / 2) - sizeof(struct ringbuf_head))
-struct ringbuf_head
-{
-       u32 write; /* Next place to write to */
-       u32 read; /* Next place to read from */
-       u8 flags;
-       char buf[0];
-} __attribute__((packed));
-
-static int xenbus_irq;
+static int xenbus_irq      = 0;
+
+extern void xenbus_probe(void *); 
+extern int xenstored_ready; 
+static DECLARE_WORK(probe_work, xenbus_probe, NULL);
 
 DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
 
-static inline struct ringbuf_head *outbuf(void)
+static inline struct xenstore_domain_interface *xenstore_domain_interface(void)
 {
        return mfn_to_virt(xen_start_info->store_mfn);
 }
 
-static inline struct ringbuf_head *inbuf(void)
-{
-       return mfn_to_virt(xen_start_info->store_mfn) + PAGE_SIZE/2;
-}
-
 static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
 {
+       if(unlikely(xenstored_ready == 0)) {
+               xenstored_ready = 1; 
+               schedule_work(&probe_work); 
+       } 
+
        wake_up(&xb_waitq);
        return IRQ_HANDLED;
 }
 
-static int check_buffer(const struct ringbuf_head *h)
-{
-       return (h->write < RINGBUF_DATASIZE && h->read < RINGBUF_DATASIZE);
-}
-
-/* We can't fill last byte: would look like empty buffer. */
-static void *get_output_chunk(const struct ringbuf_head *h,
-                             void *buf, u32 *len)
-{
-       u32 read_mark;
-
-       if (h->read == 0)
-               read_mark = RINGBUF_DATASIZE - 1;
-       else
-               read_mark = h->read - 1;
-
-       /* Here to the end of buffer, unless they haven't read some out. */
-       *len = RINGBUF_DATASIZE - h->write;
-       if (read_mark >= h->write)
-               *len = read_mark - h->write;
-       return buf + h->write;
-}
-
-static const void *get_input_chunk(const struct ringbuf_head *h,
-                                  const void *buf, u32 *len)
-{
-       /* Here to the end of buffer, unless they haven't written some. */
-       *len = RINGBUF_DATASIZE - h->read;
-       if (h->write >= h->read)
-               *len = h->write - h->read;
-       return buf + h->read;
-}
-
-static void update_output_chunk(struct ringbuf_head *h, u32 len)
-{
-       h->write += len;
-       if (h->write == RINGBUF_DATASIZE)
-               h->write = 0;
-}
-
-static void update_input_chunk(struct ringbuf_head *h, u32 len)
-{
-       h->read += len;
-       if (h->read == RINGBUF_DATASIZE)
-               h->read = 0;
-}
-
-static int output_avail(struct ringbuf_head *out)
-{
-       unsigned int avail;
-
-       get_output_chunk(out, out->buf, &avail);
-       return avail != 0;
+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
+{
+       return ((prod - cons) <= XENSTORE_RING_SIZE);
+}
+
+static void *get_output_chunk(XENSTORE_RING_IDX cons,
+                             XENSTORE_RING_IDX prod,
+                             char *buf, uint32_t *len)
+{
+       *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
+       if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
+               *len = XENSTORE_RING_SIZE - (prod - cons);
+       return buf + MASK_XENSTORE_IDX(prod);
+}
+
+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
+                                  XENSTORE_RING_IDX prod,
+                                  const char *buf, uint32_t *len)
+{
+       *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+       if ((prod - cons) < *len)
+               *len = prod - cons;
+       return buf + MASK_XENSTORE_IDX(cons);
 }
 
 int xb_write(const void *data, unsigned len)
 {
-       struct ringbuf_head h;
-       struct ringbuf_head *out = outbuf();
-
-       do {
+       struct xenstore_domain_interface *intf = xenstore_domain_interface();
+       XENSTORE_RING_IDX cons, prod;
+
+       while (len != 0) {
                void *dst;
                unsigned int avail;
 
-               wait_event_interruptible(xb_waitq, output_avail(out));
-
+               wait_event_interruptible(xb_waitq,
+                                        (intf->req_prod - intf->req_cons) !=
+                                        XENSTORE_RING_SIZE);
+
+               /* Read indexes, then verify. */
+               cons = intf->req_cons;
+               prod = intf->req_prod;
                mb();
-               h = *out;
-               if (!check_buffer(&h))
+               if (!check_indexes(cons, prod))
                        return -EIO;
 
-               dst = get_output_chunk(&h, out->buf, &avail);
+               dst = get_output_chunk(cons, prod, intf->req, &avail);
                if (avail == 0)
                        continue;
                if (avail > len)
                        avail = len;
+
                memcpy(dst, data, avail);
                data += avail;
                len -= avail;
-               update_output_chunk(out, avail);
+
+               /* Other side must not see new header until data is there. */
+               wmb();
+               intf->req_prod += avail;
+
+               /* This implies mb() before other side sees interrupt. */
                notify_remote_via_evtchn(xen_start_info->store_evtchn);
-       } while (len != 0);
+       }
 
        return 0;
 }
 
-int xs_input_avail(void)
-{
-       unsigned int avail;
-       struct ringbuf_head *in = inbuf();
-
-       get_input_chunk(in, in->buf, &avail);
-       return avail != 0;
-}
-
 int xb_read(void *data, unsigned len)
 {
-       struct ringbuf_head h;
-       struct ringbuf_head *in = inbuf();
-       int was_full;
+       struct xenstore_domain_interface *intf = xenstore_domain_interface();
+       XENSTORE_RING_IDX cons, prod;
 
        while (len != 0) {
                unsigned int avail;
                const char *src;
 
-               wait_event_interruptible(xb_waitq, xs_input_avail());
-
+               wait_event_interruptible(xb_waitq,
+                                        intf->rsp_cons != intf->rsp_prod);
+
+               /* Read indexes, then verify. */
+               cons = intf->rsp_cons;
+               prod = intf->rsp_prod;
                mb();
-               h = *in;
-               if (!check_buffer(&h))
+               if (!check_indexes(cons, prod))
                        return -EIO;
 
-               src = get_input_chunk(&h, in->buf, &avail);
+               src = get_input_chunk(cons, prod, intf->rsp, &avail);
                if (avail == 0)
                        continue;
                if (avail > len)
                        avail = len;
-               was_full = !output_avail(&h);
+
+               /* We must read header before we read data. */
+               rmb();
 
                memcpy(data, src, avail);
                data += avail;
                len -= avail;
-               update_input_chunk(in, avail);
+
+               /* Other side must not see free space until we've copied out */
+               mb();
+               intf->rsp_cons += avail;
+
                pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
-               /* If it was full, tell them we've taken some. */
-               if (was_full)
-                       notify_remote_via_evtchn(xen_start_info->store_evtchn);
+
+               /* Implies mb(): they will see new header. */
+               notify_remote_via_evtchn(xen_start_info->store_evtchn);
        }
 
        return 0;
@@ -203,10 +178,6 @@
 
        if (xenbus_irq)
                unbind_evtchn_from_irqhandler(xenbus_irq, &xb_waitq);
-       xenbus_irq = 0;
-
-       if (!xen_start_info->store_evtchn)
-               return 0;
 
        err = bind_evtchn_to_irqhandler(
                xen_start_info->store_evtchn, wake_waiting,
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c    Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c    Mon Oct 24 
15:08:13 2005
@@ -27,17 +27,28 @@
  */
 #define DEBUG
 
-#include <asm/hypervisor.h>
-#include <asm-xen/xenbus.h>
-#include <asm-xen/balloon.h>
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
 #include <linux/fcntl.h>
-#include <stdarg.h>
+#include <linux/mm.h>
 #include <linux/notifier.h>
+#include <linux/kthread.h>
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/hypervisor.h>
+#include <asm-xen/xenbus.h>
+#include <asm-xen/xen_proc.h>
+#include <asm-xen/balloon.h>
+#include <asm-xen/evtchn.h>
+#include <asm-xen/linux-public/evtchn.h>
+
 #include "xenbus_comms.h"
+
+extern struct semaphore xenwatch_mutex;
 
 #define streq(a, b) (strcmp((a), (b)) == 0)
 
@@ -229,13 +240,18 @@
 static int xenbus_register_driver_common(struct xenbus_driver *drv,
                                         struct xen_bus_type *bus)
 {
+       int ret;
+
        drv->driver.name = drv->name;
        drv->driver.bus = &bus->bus;
        drv->driver.owner = drv->owner;
        drv->driver.probe = xenbus_dev_probe;
        drv->driver.remove = xenbus_dev_remove;
 
-       return driver_register(&drv->driver);
+       down(&xenwatch_mutex);
+       ret = driver_register(&drv->driver);
+       up(&xenwatch_mutex);
+       return ret;
 }
 
 int xenbus_register_driver(struct xenbus_driver *drv)
@@ -627,15 +643,19 @@
        bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, resume_dev);
 }
 
+
+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
+int xenstored_ready = 0; 
+
+
 int register_xenstore_notifier(struct notifier_block *nb)
 {
        int ret = 0;
 
-       if (xen_start_info->store_evtchn) {
+        if(xenstored_ready > 0) 
                ret = nb->notifier_call(nb, 0, NULL);
-       } else {
+       else 
                notifier_chain_register(&xenstore_chain, nb);
-       }
 
        return ret;
 }
@@ -647,22 +667,11 @@
 }
 EXPORT_SYMBOL(unregister_xenstore_notifier);
 
-/* 
-** Called either from below xenbus_probe_init() initcall (for domUs) 
-** or, for dom0, from a thread created in privcmd/privcmd.c (after 
-** the user-space tools have invoked initDomainStore()) 
-*/
-int do_xenbus_probe(void *unused)
-{
-       int err = 0;
-
-       /* Initialize the interface to xenstore. */
-       err = xs_init();
-       if (err) {
-               printk("XENBUS: Error initializing xenstore comms:"
-                      " %i\n", err);
-               return err;
-       }
+
+
+void xenbus_probe(void *unused)
+{
+       BUG_ON((xenstored_ready <= 0)); 
 
        /* Enumerate devices in xenstore. */
        xenbus_probe_devices(&xenbus_frontend);
@@ -675,27 +684,101 @@
        /* Notify others that xenstore is up */
        notifier_call_chain(&xenstore_chain, 0, 0);
 
-       return 0;
-}
+       return;
+}
+
+
+static struct proc_dir_entry *xsd_mfn_intf;
+static struct proc_dir_entry *xsd_port_intf;
+
+
+static int xsd_mfn_read(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       int len; 
+       len  = sprintf(page, "%ld", xen_start_info->store_mfn); 
+       *eof = 1; 
+       return len; 
+}
+
+static int xsd_port_read(char *page, char **start, off_t off,
+                        int count, int *eof, void *data)
+{
+       int len; 
+
+       len  = sprintf(page, "%d", xen_start_info->store_evtchn); 
+       *eof = 1; 
+       return len; 
+}
+
 
 static int __init xenbus_probe_init(void)
 {
-       if (xen_init() < 0)
+       int err = 0;
+       /* 
+       ** Domain0 doesn't have a store_evtchn or store_mfn yet. 
+       */
+       int dom0 = (xen_start_info->store_evtchn == 0);
+
+       printk("xenbus_probe_init\n");
+
+       if (xen_init() < 0) {
+               printk("xen_init failed\n");
                return -ENODEV;
-
+       }
+
+       /* Register ourselves with the kernel bus & device subsystems */
        bus_register(&xenbus_frontend.bus);
        bus_register(&xenbus_backend.bus);
        device_register(&xenbus_frontend.dev);
        device_register(&xenbus_backend.dev);
 
-       /* 
-       ** Domain0 doesn't have a store_evtchn yet - this will
-       ** be set up later by xend invoking initDomainStore() 
-       */
-       if (!xen_start_info->store_evtchn)
-               return 0;
-
-       do_xenbus_probe(NULL);
+       if (dom0) {
+
+               unsigned long page;
+               evtchn_op_t op = { 0 };
+
+
+               /* Allocate page. */
+               page = get_zeroed_page(GFP_KERNEL);
+               if (!page) 
+                       return -ENOMEM; 
+
+               /* We don't refcnt properly, so set reserved on page.
+                * (this allocation is permanent) */
+               SetPageReserved(virt_to_page(page));
+
+               xen_start_info->store_mfn =
+                       pfn_to_mfn(virt_to_phys((void *)page) >>
+                                  PAGE_SHIFT);
+               
+               /* Next allocate a local port which xenstored can bind to */
+               op.cmd = EVTCHNOP_alloc_unbound;
+               op.u.alloc_unbound.dom        = DOMID_SELF;
+               op.u.alloc_unbound.remote_dom = 0; 
+
+               BUG_ON(HYPERVISOR_event_channel_op(&op)); 
+               xen_start_info->store_evtchn = op.u.alloc_unbound.port;
+
+               /* And finally publish the above info in /proc/xen */
+               if((xsd_mfn_intf = create_xen_proc_entry("xsd_mfn", 0400)))
+                       xsd_mfn_intf->read_proc = xsd_mfn_read; 
+               if((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400)))
+                       xsd_port_intf->read_proc = xsd_port_read;
+       }
+
+       /* Initialize the interface to xenstore. */
+       err = xs_init(); 
+       if (err) {
+               printk("XENBUS: Error initializing xenstore comms: %i\n", err);
+               return err; 
+       }
+
+       if (!dom0) {
+               xenstored_ready = 1;
+               xenbus_probe(NULL);
+       }
+
        return 0;
 }
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Mon Oct 24 
15:08:13 2005
@@ -92,7 +92,7 @@
  * carrying out work.
  */
 static pid_t xenwatch_pid;
-static DECLARE_MUTEX(xenwatch_mutex);
+/* static */ DECLARE_MUTEX(xenwatch_mutex);
 static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
 
 static int get_error(const char *errorstring)
@@ -516,17 +516,38 @@
 }
 EXPORT_SYMBOL(xenbus_printf);
 
+/**
+ * Return the path to the error node for the given device, or NULL on failure.
+ * If the value returned is non-NULL, then it is the caller's to kfree.
+ */
+static char *error_path(struct xenbus_device *dev)
+{
+       char *path_buffer = kmalloc(strlen("error/") + strlen(dev->nodename) +
+                                   1, GFP_KERNEL);
+       if (path_buffer == NULL) {
+               return NULL;
+       }
+
+       strcpy(path_buffer, "error/");
+       strcpy(path_buffer + strlen("error/"), dev->nodename);
+
+       return path_buffer;
+}
+
 /* Report a (negative) errno into the store, with explanation. */
 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
 {
        va_list ap;
        int ret;
        unsigned int len;
-       char *printf_buffer;
+       char *printf_buffer = NULL, *path_buffer = NULL;
 
        printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-       if (printf_buffer == NULL)
+       if (printf_buffer == NULL) {
+               printk("xenbus: failed to write error node for %s (%d): %d\n",
+                      dev->nodename, err, errno);
                goto fail;
+       }
 
        len = sprintf(printf_buffer, "%i ", -err);
        va_start(ap, fmt);
@@ -535,15 +556,26 @@
 
        BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
        dev->has_error = 1;
-       if (xenbus_write(NULL, dev->nodename, "error", printf_buffer) != 0)
+
+       path_buffer = error_path(dev);
+
+       if (path_buffer == NULL) {
+               printk("xenbus: failed to write error node for %s (%s): %d\n",
+                      dev->nodename, printf_buffer, errno);
                goto fail;
-
-       kfree(printf_buffer);
-       return;
-
- fail:
-       printk("xenbus: failed to write error node for %s (%s)\n",
-              dev->nodename, printf_buffer);
+       }
+
+       if (xenbus_write(NULL, path_buffer, "error", printf_buffer) != 0) {
+               printk("xenbus: failed to write error node for %s (%s)\n",
+                      dev->nodename, printf_buffer);
+               goto fail;
+       }
+
+fail:
+       if (printf_buffer)
+               kfree(printf_buffer);
+       if (path_buffer)
+               kfree(path_buffer);
 }
 EXPORT_SYMBOL(xenbus_dev_error);
 
@@ -551,11 +583,21 @@
 void xenbus_dev_ok(struct xenbus_device *dev)
 {
        if (dev->has_error) {
-               if (xenbus_rm(NULL, dev->nodename, "error") != 0)
+               char *path_buffer = error_path(dev);
+
+               if (path_buffer == NULL) {
+                       printk("xenbus: failed to clear error node for %s: "
+                              "%d\n", dev->nodename, errno);
+                       return;
+               }
+
+               if (xenbus_rm(NULL, path_buffer, "error") != 0)
                        printk("xenbus: failed to clear error node for %s\n",
                               dev->nodename);
                else
                        dev->has_error = 0;
+
+               kfree(path_buffer);
        }
 }
 EXPORT_SYMBOL(xenbus_dev_ok);
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypervisor.h        Fri Oct 
21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypervisor.h        Mon Oct 
24 15:08:13 2005
@@ -49,6 +49,8 @@
 #  define pud_t pgd_t
 # endif
 #endif
+
+extern shared_info_t *HYPERVISOR_shared_info;
 
 /* arch/xen/i386/kernel/setup.c */
 extern start_info_t *xen_start_info;
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h   Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h   Mon Oct 24 
15:08:13 2005
@@ -455,6 +455,11 @@
                             unsigned long size, 
                             pgprot_t prot,
                             domid_t  domid);
+int direct_kernel_remap_pfn_range(unsigned long address, 
+                                 unsigned long mfn,
+                                 unsigned long size, 
+                                 pgprot_t prot,
+                                 domid_t  domid);
 int create_lookup_pte_addr(struct mm_struct *mm,
                            unsigned long address,
                            unsigned long *ptep);
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h     Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h     Mon Oct 24 
15:08:13 2005
@@ -8,7 +8,7 @@
 
 #define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
 #define PFN_DOWN(x)    ((x) >> PAGE_SHIFT)
-#define PFN_PHYS(x)    ((x) << PAGE_SHIFT)
+#define PFN_PHYS(x)    ((unsigned long long)(x) << PAGE_SHIFT)
 
 /*
  * Reserved space for vmalloc and iomap - defined in asm/page.h
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h    Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h    Mon Oct 24 
15:08:13 2005
@@ -497,22 +497,11 @@
  * includes these barriers, for example.
  */
 
-/*
- * Don't use smp_processor_id() in preemptible code: debug builds will barf.
- * It's okay in these cases as we only read the upcall mask in preemptible
- * regions, which is always safe.
- */
-#ifdef CONFIG_SMP
-#define __this_cpu()   __smp_processor_id()
-#else
-#define __this_cpu()   0
-#endif
-
 #define __cli()                                                                
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
        barrier();                                                      \
@@ -523,7 +512,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 0;                                  \
        barrier(); /* unmask then check (avoid races) */                \
        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
@@ -534,8 +523,10 @@
 #define __save_flags(x)                                                        
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
+       preempt_enable();                                               \
 } while (0)
 
 #define __restore_flags(x)                                             \
@@ -543,7 +534,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
                barrier(); /* unmask then check (avoid races) */        \
                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
@@ -559,7 +550,7 @@
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
@@ -572,8 +563,15 @@
 #define local_irq_disable()    __cli()
 #define local_irq_enable()     __sti()
 
+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
 #define irqs_disabled()                                                        
\
-       HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
+({     int ___x;                                                       \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       ___x = (_vcpu->evtchn_upcall_mask != 0);                        \
+       preempt_enable_no_resched();                                    \
+       ___x; })
 
 /*
  * disable hlt during certain critical i/o operations
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h        Fri Oct 
21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h        Mon Oct 
24 15:08:13 2005
@@ -39,6 +39,7 @@
 #include <asm/ptrace.h>
 #include <asm/page.h>
 
+extern shared_info_t *HYPERVISOR_shared_info;
 extern start_info_t *xen_start_info;
 
 void force_evtchn_callback(void);
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Mon Oct 24 
15:08:13 2005
@@ -533,6 +533,12 @@
                             pgprot_t prot,
                             domid_t  domid);
 
+int direct_kernel_remap_pfn_range(unsigned long address, 
+                                 unsigned long mfn,
+                                 unsigned long size, 
+                                 pgprot_t prot,
+                                 domid_t  domid);
+
 int create_lookup_pte_addr(struct mm_struct *mm,
                            unsigned long address,
                            unsigned long *ptep);
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/smp.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/smp.h     Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/smp.h     Mon Oct 24 
15:08:13 2005
@@ -34,7 +34,6 @@
 extern cpumask_t cpu_present_mask;
 extern cpumask_t cpu_possible_map;
 extern cpumask_t cpu_online_map;
-extern cpumask_t cpu_callout_map;
 
 /*
  * Private routines/data
@@ -52,8 +51,8 @@
 void smp_stop_cpu(void);
 extern cpumask_t cpu_sibling_map[NR_CPUS];
 extern cpumask_t cpu_core_map[NR_CPUS];
-extern u8 phys_proc_id[NR_CPUS];
-extern u8 cpu_core_id[NR_CPUS];
+extern int phys_proc_id[NR_CPUS];
+extern int cpu_core_id[NR_CPUS];
 
 #define SMP_TRAMPOLINE_BASE 0x6000
 
@@ -65,7 +64,7 @@
 
 static inline int num_booting_cpus(void)
 {
-       return cpus_weight(cpu_callout_map);
+       return cpus_weight(cpu_possible_map);
 }
 
 #define __smp_processor_id() read_pda(cpunumber)
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h  Fri Oct 21 
19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h  Mon Oct 24 
15:08:13 2005
@@ -321,22 +321,11 @@
  * includes these barriers, for example.
  */
 
-/*
- * Don't use smp_processor_id() in preemptible code: debug builds will barf.
- * It's okay in these cases as we only read the upcall mask in preemptible
- * regions, which is always safe.
- */
-#ifdef CONFIG_SMP
-#define __this_cpu()   __smp_processor_id()
-#else
-#define __this_cpu()   0
-#endif
-
 #define __cli()                                                                
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
        barrier();                                                      \
@@ -347,7 +336,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 0;                                  \
        barrier(); /* unmask then check (avoid races) */                \
        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
@@ -358,8 +347,10 @@
 #define __save_flags(x)                                                        
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
+       preempt_enable();                                               \
 } while (0)
 
 #define __restore_flags(x)                                             \
@@ -367,7 +358,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
                barrier(); /* unmask then check (avoid races) */        \
                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
@@ -383,7 +374,7 @@
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
@@ -398,8 +389,15 @@
 #define local_irq_disable()    __cli()
 #define local_irq_enable()     __sti()
 
+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
 #define irqs_disabled()                                                        
\
-       HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
+({     int ___x;                                                       \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       ___x = (_vcpu->evtchn_upcall_mask != 0);                        \
+       preempt_enable_no_resched();                                    \
+       ___x; })
 
 /*
  * disable hlt during certain critical i/o operations
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/evtchn.h
--- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h     Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h     Mon Oct 24 15:08:13 2005
@@ -44,12 +44,12 @@
  */
 
 /* Dynamically bind a VIRQ source to Linux IRQ space. */
-extern int  bind_virq_to_irq(int virq);
-extern void unbind_virq_from_irq(int virq);
+extern int  bind_virq_to_irq(int virq, int cpu);
+extern void unbind_virq_from_irq(int virq, int cpu);
 
 /* Dynamically bind an IPI source to Linux IRQ space. */
-extern int  bind_ipi_to_irq(int ipi);
-extern void unbind_ipi_from_irq(int ipi);
+extern int  bind_ipi_to_irq(int ipi, int cpu);
+extern void unbind_ipi_from_irq(int ipi, int cpu);
 
 /*
  * Dynamically bind an event-channel port to an IRQ-like callback handler.
@@ -99,8 +99,9 @@
         * like a real IO-APIC we 'lose the interrupt edge' if the channel is
         * masked.
         */
-       if (synch_test_bit         (port,    &s->evtchn_pending[0]) && 
-           !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel)) {
+       if (synch_test_bit(port, &s->evtchn_pending[0]) && 
+           !synch_test_and_set_bit(port / BITS_PER_LONG,
+                                   &vcpu_info->evtchn_pending_sel)) {
                vcpu_info->evtchn_upcall_pending = 1;
                if (!vcpu_info->evtchn_upcall_mask)
                        force_evtchn_callback();
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h
--- a/linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h       Fri Oct 
21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h       Mon Oct 
24 15:08:13 2005
@@ -76,8 +76,6 @@
        _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
 #define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN                  \
        _IOC(_IOC_READ, 'P', 4, sizeof(unsigned long))
-#define IOCTL_PRIVCMD_INITDOMAIN_STORE                         \
-       _IOC(_IOC_READ, 'P', 5, 0)
 
 #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 patches/linux-2.6.12/net-csum.patch
--- a/patches/linux-2.6.12/net-csum.patch       Fri Oct 21 19:58:39 2005
+++ b/patches/linux-2.6.12/net-csum.patch       Mon Oct 24 15:08:13 2005
@@ -9,3 +9,48 @@
            && csum_tcpudp_magic(iph->saddr, iph->daddr, udplen, IPPROTO_UDP,
                                 skb->ip_summed == CHECKSUM_HW ? skb->csum
                                 : skb_checksum(skb, iph->ihl*4, udplen, 0))) {
+
+--- ../xen-unstable.hg/linux-2.6.12-xen0/net/ipv4/netfilter/ip_nat_proto_udp.c 
2005-06-17 14:48:29.000000000 -0500
++++ linux-2.6-xen-sparse/net/ipv4/netfilter/ip_nat_proto_udp.c 2005-10-14 
15:17:53.000000000 -0500
+@@ -112,11 +112,19 @@ udp_manip_pkt(struct sk_buff **pskb,
+               newport = tuple->dst.u.udp.port;
+               portptr = &hdr->dest;
+       }
+-      if (hdr->check) /* 0 is a special case meaning no checksum */
+-              hdr->check = ip_nat_cheat_check(~oldip, newip,
++      
++      if (hdr->check) { /* 0 is a special case meaning no checksum */
++              if ((*pskb)->proto_csum_blank) {
++                      hdr->check = ip_nat_cheat_check(oldip, ~newip, 
++                                      ip_nat_cheat_check(*portptr ^ 0xFFFF, 
++                                              newport, hdr->check));
++              } else {
++                      hdr->check = ip_nat_cheat_check(~oldip, newip,
+                                       ip_nat_cheat_check(*portptr ^ 0xFFFF,
+                                                          newport,
+                                                          hdr->check));
++              }
++      }
+       *portptr = newport;
+       return 1;
+ }
+--- ../xen-unstable.hg/linux-2.6.12-xen0/net/ipv4/netfilter/ip_nat_proto_tcp.c 
2005-06-17 14:48:29.000000000 -0500
++++ linux-2.6-xen-sparse/net/ipv4/netfilter/ip_nat_proto_tcp.c 2005-10-14 
16:41:20.000000000 -0500
+@@ -127,10 +127,16 @@ tcp_manip_pkt(struct sk_buff **pskb,
+       if (hdrsize < sizeof(*hdr))
+               return 1;
+ 
+-      hdr->check = ip_nat_cheat_check(~oldip, newip,
++      if ((*pskb)->proto_csum_blank) {
++              hdr->check = ip_nat_cheat_check(oldip, ~newip,
++                              ip_nat_cheat_check(oldport ^ 0xFFFF,
++                                      newport, hdr->check));
++      } else { 
++              hdr->check = ip_nat_cheat_check(~oldip, newip,
+                                       ip_nat_cheat_check(oldport ^ 0xFFFF,
+                                                          newport,
+                                                          hdr->check));
++      }
+       return 1;
+ }
+ 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/block-async.c
--- a/tools/blktap/parallax/block-async.c       Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/block-async.c       Mon Oct 24 15:08:13 2005
@@ -31,11 +31,11 @@
  */
 
 struct read_args {
-    u64 addr;
+    uint64_t addr;
 };
 
 struct write_args {
-    u64   addr;
+    uint64_t   addr;
     char *block;
 };
 
@@ -94,7 +94,7 @@
                
 } 
 
-void block_read(u64 addr, io_cb_t cb, void *param)
+void block_read(uint64_t addr, io_cb_t cb, void *param)
 {
     struct pending_io_req *req;
     
@@ -113,7 +113,7 @@
 }
 
 
-void block_write(u64 addr, char *block, io_cb_t cb, void *param)
+void block_write(uint64_t addr, char *block, io_cb_t cb, void *param)
 {
     struct pending_io_req *req;
     
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/block-async.h
--- a/tools/blktap/parallax/block-async.h       Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/block-async.h       Mon Oct 24 15:08:13 2005
@@ -14,7 +14,7 @@
 {
     enum {IO_ADDR_T, IO_BLOCK_T, IO_INT_T} type;
     union {
-        u64   a;
+        uint64_t   a;
         char *b;
         int   i;
     } u;
@@ -38,8 +38,8 @@
 };
 void radix_lock_init(struct radix_lock *r);
 
-void block_read(u64 addr, io_cb_t cb, void *param);
-void block_write(u64 addr, char *block, io_cb_t cb, void *param);
+void block_read(uint64_t addr, io_cb_t cb, void *param);
+void block_write(uint64_t addr, char *block, io_cb_t cb, void *param);
 void block_alloc(char *block, io_cb_t cb, void *param);
 void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param);
 void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param);
@@ -47,7 +47,7 @@
 void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param);
 void init_block_async(void);
 
-static inline u64 IO_ADDR(struct io_ret r)
+static inline uint64_t IO_ADDR(struct io_ret r)
 {
     assert(r.type == IO_ADDR_T);
     return r.u.a;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/blockstore.c
--- a/tools/blktap/parallax/blockstore.c        Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/blockstore.c        Mon Oct 24 15:08:13 2005
@@ -132,9 +132,9 @@
 #define ENTER_LUID_CR pthread_mutex_lock(&ptmutex_luid)
 #define LEAVE_LUID_CR pthread_mutex_unlock(&ptmutex_luid)
 
-static u64 luid_cnt = 0x1000ULL;
-u64 new_luid(void) {
-    u64 luid;
+static uint64_t luid_cnt = 0x1000ULL;
+uint64_t new_luid(void) {
+    uint64_t luid;
     ENTER_LUID_CR;
     luid = luid_cnt++;
     LEAVE_LUID_CR;
@@ -539,7 +539,7 @@
  * Reading                                                                   *
  *****************************************************************************/
 
-void *readblock_indiv(int server, u64 id) {
+void *readblock_indiv(int server, uint64_t id) {
     void *block;
     bsq_t *qe;
     int len, rc;
@@ -616,9 +616,9 @@
  *
  *   @return: pointer to block, NULL on error
  */
-void *readblock(u64 id) {
+void *readblock(uint64_t id) {
     int map = (int)BSID_MAP(id);
-    u64 xid;
+    uint64_t xid;
     static int i = CLUSTER_MAX_REPLICAS - 1;
     void *block = NULL;
 
@@ -670,7 +670,7 @@
  * Writing                                                                   *
  *****************************************************************************/
 
-bsq_t *writeblock_indiv(int server, u64 id, void *block) {
+bsq_t *writeblock_indiv(int server, uint64_t id, void *block) {
 
     bsq_t *qe;
     int len;
@@ -709,7 +709,7 @@
  *
  *   @return: zero on success, -1 on failure
  */
-int writeblock(u64 id, void *block) {
+int writeblock(uint64_t id, void *block) {
     
     int map = (int)BSID_MAP(id);
     int rep0 = bsclusters[map].servers[0];
@@ -805,11 +805,11 @@
  *
  *   @return: new id of block on disk
  */
-u64 allocblock(void *block) {
+uint64_t allocblock(void *block) {
     return allocblock_hint(block, 0);
 }
 
-bsq_t *allocblock_hint_indiv(int server, void *block, u64 hint) {
+bsq_t *allocblock_hint_indiv(int server, void *block, uint64_t hint) {
     bsq_t *qe;
     int len;
 
@@ -846,14 +846,14 @@
  *
  *   @return: new id of block on disk
  */
-u64 allocblock_hint(void *block, u64 hint) {
+uint64_t allocblock_hint(void *block, uint64_t hint) {
     int map = (int)hint;
     int rep0 = bsclusters[map].servers[0];
     int rep1 = bsclusters[map].servers[1];
     int rep2 = bsclusters[map].servers[2];
     bsq_t *reqs[3];
     int rc;
-    u64 id0, id1, id2;
+    uint64_t id0, id1, id2;
 
     reqs[0] = reqs[1] = reqs[2] = NULL;
 
@@ -938,7 +938,7 @@
  *   @return: pointer to block, NULL on error
  */
 
-void *readblock(u64 id) {
+void *readblock(uint64_t id) {
     void *block;
     int block_fp;
    
@@ -980,7 +980,7 @@
  *
  *   @return: zero on success, -1 on failure
  */
-int writeblock(u64 id, void *block) {
+int writeblock(uint64_t id, void *block) {
     
     int block_fp;
     
@@ -1014,8 +1014,8 @@
  *   @return: new id of block on disk
  */
 
-u64 allocblock(void *block) {
-    u64 lb;
+uint64_t allocblock(void *block) {
+    uint64_t lb;
     off64_t pos;
     int block_fp;
     
@@ -1057,7 +1057,7 @@
  *
  *   @return: new id of block on disk
  */
-u64 allocblock_hint(void *block, u64 hint) {
+uint64_t allocblock_hint(void *block, uint64_t hint) {
     return allocblock(block);
 }
 
@@ -1109,7 +1109,7 @@
     return fb;
 }
 
-void releaseblock(u64 id)
+void releaseblock(uint64_t id)
 {
     blockstore_super_t *bs_super;
     freeblock_t *fl_current;
@@ -1154,7 +1154,7 @@
 {
     blockstore_super_t *bs_super;
     freeblock_t *fb;
-    u64 total = 0, next;
+    uint64_t total = 0, next;
     
     bs_super = (blockstore_super_t *) readblock(BLOCKSTORE_SUPER);
     
@@ -1205,7 +1205,7 @@
 {
     int i;
     blockstore_super_t *bs_super;
-    u64 ret;
+    uint64_t ret;
     int block_fp;
     
 #ifdef BLOCKSTORE_REMOTE
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/blockstore.h
--- a/tools/blktap/parallax/blockstore.h        Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/blockstore.h        Mon Oct 24 15:08:13 2005
@@ -21,33 +21,33 @@
 #define SECTOR_SHIFT   9 
 #endif
 
-#define FREEBLOCK_SIZE  (BLOCK_SIZE / sizeof(u64)) - (3 * sizeof(u64))
+#define FREEBLOCK_SIZE  (BLOCK_SIZE / sizeof(uint64_t)) - (3 * 
sizeof(uint64_t))
 #define FREEBLOCK_MAGIC 0x0fee0fee0fee0feeULL
 
 typedef struct {
-    u64 magic;
-    u64 next;
-    u64 count;
-    u64 list[FREEBLOCK_SIZE];
+    uint64_t magic;
+    uint64_t next;
+    uint64_t count;
+    uint64_t list[FREEBLOCK_SIZE];
 } freeblock_t; 
 
 #define BLOCKSTORE_MAGIC 0xaaaaaaa00aaaaaaaULL
 #define BLOCKSTORE_SUPER 1ULL
 
 typedef struct {
-    u64 magic;
-    u64 freelist_full;
-    u64 freelist_current;
+    uint64_t magic;
+    uint64_t freelist_full;
+    uint64_t freelist_current;
 } blockstore_super_t;
 
 extern void *newblock();
-extern void *readblock(u64 id);
-extern u64 allocblock(void *block);
-extern u64 allocblock_hint(void *block, u64 hint);
-extern int writeblock(u64 id, void *block);
+extern void *readblock(uint64_t id);
+extern uint64_t allocblock(void *block);
+extern uint64_t allocblock_hint(void *block, uint64_t hint);
+extern int writeblock(uint64_t id, void *block);
 
 /* Add this blockid to a freelist, to be recycled by the allocator. */
-extern void releaseblock(u64 id);
+extern void releaseblock(uint64_t id);
 
 /* this is a memory free() operation for block-sized allocations */
 extern void freeblock(void *block);
@@ -55,17 +55,17 @@
 
 /* debug for freelist. */
 void freelist_count(int print_each);
-#define ALLOCFAIL (((u64)(-1)))
+#define ALLOCFAIL (((uint64_t)(-1)))
 
 /* Distribution
  */
 #define BLOCKSTORED_PORT 9346
 
 struct bshdr_t_struct {
-    u32            operation;
-    u32            flags;
-    u64            id;
-    u64            luid;
+    uint32_t            operation;
+    uint32_t            flags;
+    uint64_t            id;
+    uint64_t            luid;
 } __attribute__ ((packed));
 typedef struct bshdr_t_struct bshdr_t;
 
@@ -76,9 +76,9 @@
 
 typedef struct bsmsg_t_struct bsmsg_t;
 
-#define MSGBUFSIZE_OP    sizeof(u32)
-#define MSGBUFSIZE_FLAGS (sizeof(u32) + sizeof(u32))
-#define MSGBUFSIZE_ID    (sizeof(u32) + sizeof(u32) + sizeof(u64) + 
sizeof(u64))
+#define MSGBUFSIZE_OP    sizeof(uint32_t)
+#define MSGBUFSIZE_FLAGS (sizeof(uint32_t) + sizeof(uint32_t))
+#define MSGBUFSIZE_ID    (sizeof(uint32_t) + sizeof(uint32_t) + 
sizeof(uint64_t) + sizeof(uint64_t))
 #define MSGBUFSIZE_BLOCK sizeof(bsmsg_t)
 
 #define BSOP_READBLOCK  0x01
@@ -113,9 +113,9 @@
 #define BSID_REPLICA2(_id) (((_id)>>40)&0xfffffULL)
 #define BSID_MAP(_id)      (((_id)>>60)&0xfULL)
 
-#define BSID(_map, _rep0, _rep1, _rep2) ((((u64)(_map))<<60) | \
-                                         (((u64)(_rep2))<<40) | \
-                                         (((u64)(_rep1))<<20) | ((u64)(_rep0)))
+#define BSID(_map, _rep0, _rep1, _rep2) ((((uint64_t)(_map))<<60) | \
+                                         (((uint64_t)(_rep2))<<40) | \
+                                         (((uint64_t)(_rep1))<<20) | 
((uint64_t)(_rep0)))
 
 typedef struct bsserver_t_struct {
     char              *hostname;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/blockstored.c
--- a/tools/blktap/parallax/blockstored.c       Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/blockstored.c       Mon Oct 24 15:08:13 2005
@@ -21,9 +21,9 @@
 
 //#define BSDEBUG
 
-int readblock_into(u64 id, void *block);
-
-int open_socket(u16 port) {
+int readblock_into(uint64_t id, void *block);
+
+int open_socket(uint16_t port) {
     
     struct sockaddr_in sn;
     int sock;
@@ -75,7 +75,7 @@
         int rc, len;
         struct sockaddr_in from;
         size_t slen = sizeof(from);
-        u64 bid;
+        uint64_t bid;
 
         len = recvfrom(bssock, (void *)&msgbuf, sizeof(msgbuf), 0,
                        (struct sockaddr *)&from, &slen);
@@ -155,7 +155,7 @@
  *   @return: 0 if OK, other on error
  */
 
-int readblock_into(u64 id, void *block) {
+int readblock_into(uint64_t id, void *block) {
     if (lseek64(block_fp, ((off64_t) id - 1LL) * BLOCK_SIZE, SEEK_SET) < 0) {
         printf ("%Ld\n", (id - 1) * BLOCK_SIZE);
         perror("readblock lseek");
@@ -175,7 +175,7 @@
  *
  *   @return: zero on success, -1 on failure
  */
-int writeblock(u64 id, void *block) {
+int writeblock(uint64_t id, void *block) {
     if (lseek64(block_fp, ((off64_t) id - 1LL) * BLOCK_SIZE, SEEK_SET) < 0) {
         perror("writeblock lseek");
         return -1;
@@ -193,10 +193,10 @@
  *
  *   @return: new id of block on disk
  */
-static u64 lastblock = 0;
-
-u64 allocblock(void *block) {
-    u64 lb;
+static uint64_t lastblock = 0;
+
+uint64_t allocblock(void *block) {
+    uint64_t lb;
     off64_t pos;
 
     retry:
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/bstest.c
--- a/tools/blktap/parallax/bstest.c    Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/bstest.c    Mon Oct 24 15:08:13 2005
@@ -22,7 +22,7 @@
 #include <errno.h>
 #include "blockstore.h"
 
-int direct(char *host, u32 op, u64 id, int len) {
+int direct(char *host, uint32_t op, uint64_t id, int len) {
     struct sockaddr_in sn, peer;
     int sock;
     bsmsg_t msgbuf;
@@ -99,8 +99,8 @@
 
 int main (int argc, char **argv) {
 
-    u32 op = 0;
-    u64 id = 0;
+    uint32_t op = 0;
+    uint64_t id = 0;
     int len = 0, rc;
     void *block;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/parallax.c
--- a/tools/blktap/parallax/parallax.c  Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/parallax.c  Mon Oct 24 15:08:13 2005
@@ -330,8 +330,8 @@
 struct cb_param {
     pending_t *pent;
     int       segment;
-    u64       sector; 
-    u64       vblock; /* for debug printing -- can be removed. */
+    uint64_t       sector; 
+    uint64_t       vblock; /* for debug printing -- can be removed. */
 };
 
 static void read_cb(struct io_ret r, void *in_param)
@@ -393,9 +393,9 @@
 int parallax_read(blkif_request_t *req, blkif_t *blkif)
 {
     blkif_response_t *rsp;
-    u64 vblock, gblock;
+    uint64_t vblock, gblock;
     vdi_t *vdi;
-    u64 sector;
+    uint64_t sector;
     int i;
     char *dpage, *spage;
     pending_t *pent;
@@ -475,9 +475,9 @@
 int parallax_write(blkif_request_t *req, blkif_t *blkif)
 {
     blkif_response_t *rsp;
-    u64 sector;
+    uint64_t sector;
     int i, writable = 0;
-    u64 vblock, gblock;
+    uint64_t vblock, gblock;
     char *spage;
     unsigned long size, offset, start;
     vdi_t *vdi;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/radix.c
--- a/tools/blktap/parallax/radix.c     Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/radix.c     Mon Oct 24 15:08:13 2005
@@ -33,7 +33,7 @@
 
 typedef struct rcache_st {
     radix_tree_node  *node;
-    u64               id;
+    uint64_t               id;
     struct rcache_st *hash_next;
     struct rcache_st *cache_next;
     struct rcache_st *cache_prev;
@@ -55,7 +55,7 @@
 }
     
 
-void rcache_write(u64 id, radix_tree_node *node)
+void rcache_write(uint64_t id, radix_tree_node *node)
 {
     rcache_t *r, *tmp, **curs;
     
@@ -135,7 +135,7 @@
     pthread_mutex_unlock(&rcache_mutex);
 }
 
-radix_tree_node *rcache_read(u64 id)
+radix_tree_node *rcache_read(uint64_t id)
 {
     rcache_t *r, *tmp;
     radix_tree_node *node = NULL;
@@ -181,7 +181,7 @@
 }
 
 
-void *rc_readblock(u64 id)
+void *rc_readblock(uint64_t id)
 {
     void *ret;
     
@@ -197,9 +197,9 @@
     return(ret);
 }
 
-u64 rc_allocblock(void *block)
-{
-    u64 ret;
+uint64_t rc_allocblock(void *block)
+{
+    uint64_t ret;
     
     ret = allocblock(block);
     
@@ -209,7 +209,7 @@
     return(ret);
 }
 
-int rc_writeblock(u64 id, void *block)
+int rc_writeblock(uint64_t id, void *block)
 {
     int ret;
     
@@ -233,9 +233,9 @@
  * whether or not the block is writable, including the return
  * values of update and snapshot
  */
-u64 lookup(int height, u64 root, u64 key);
-u64 update(int height, u64 root, u64 key, u64 val);
-u64 snapshot(u64 root);
+uint64_t lookup(int height, uint64_t root, uint64_t key);
+uint64_t update(int height, uint64_t root, uint64_t key, uint64_t val);
+uint64_t snapshot(uint64_t root);
 
 /**
  * cloneblock: clone an existing block in memory
@@ -264,9 +264,9 @@
  *   @return: value on success, zero on error
  */
 
-u64 lookup(int height, u64 root, u64 key) {
+uint64_t lookup(int height, uint64_t root, uint64_t key) {
     radix_tree_node node;
-    u64 mask = ONE;
+    uint64_t mask = ONE;
     
     assert(key >> height == 0);
 
@@ -275,7 +275,7 @@
 
     /* now carve off equal sized chunks at each step */
     for (;;) {
-        u64 oldroot;
+        uint64_t oldroot;
 
 #ifdef DEBUG
         printf("lookup: height=%3d root=%3Ld offset=%3d%s\n", height, root,
@@ -314,9 +314,9 @@
  *   @returns: (possibly new) root id on success (with LSB=1), 0 on failure
  */
 
-u64 update(int height, u64 root, u64 key, u64 val) {
+uint64_t update(int height, uint64_t root, uint64_t key, uint64_t val) {
     int offset;
-    u64 child;
+    uint64_t child;
     radix_tree_node node;
     
     /* base case--return val */
@@ -390,7 +390,7 @@
  *
  *   @return: new root node, 0 on error
  */
-u64 snapshot(u64 root) {
+uint64_t snapshot(uint64_t root) {
     radix_tree_node node, newnode;
 
     if ((node = rc_readblock(getid(root))) == NULL)
@@ -418,7 +418,7 @@
  * child are okay...)
  */
 
-int collapse(int height, u64 proot, u64 croot)
+int collapse(int height, uint64_t proot, uint64_t croot)
 {
     int i, numlinks, ret, total = 0;
     radix_tree_node pnode, cnode;
@@ -480,7 +480,7 @@
 }
 
 
-void print_root(u64 root, int height, FILE *dot_f)
+void print_root(uint64_t root, int height, FILE *dot_f)
 {
     FILE *f;
     int i;
@@ -558,9 +558,9 @@
 #ifdef RADIX_STANDALONE
 
 int main(int argc, char **argv) {
-    u64 key = ZERO, val = ZERO;
-    u64 root = writable(2ULL);
-    u64 p = ZERO, c = ZERO;
+    uint64_t key = ZERO, val = ZERO;
+    uint64_t root = writable(2ULL);
+    uint64_t p = ZERO, c = ZERO;
     int v;
     char buff[4096];
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/radix.h
--- a/tools/blktap/parallax/radix.h     Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/radix.h     Mon Oct 24 15:08:13 2005
@@ -24,7 +24,7 @@
 #define RADIX_TREE_MAP_MASK 0x1ff
 #define RADIX_TREE_MAP_ENTRIES 512
 
-typedef u64 *radix_tree_node;
+typedef uint64_t *radix_tree_node;
 
 
 /*
@@ -33,11 +33,11 @@
  * whether or not the block is writable, including the return
  * values of update and snapshot
  */
-u64 lookup(int height, u64 root, u64 key);
-u64 update(int height, u64 root, u64 key, u64 val);
-u64 snapshot(u64 root);
-int collapse(int height, u64 proot, u64 croot);
-int isprivate(int height, u64 root, u64 key);
+uint64_t lookup(int height, uint64_t root, uint64_t key);
+uint64_t update(int height, uint64_t root, uint64_t key, uint64_t val);
+uint64_t snapshot(uint64_t root);
+int collapse(int height, uint64_t proot, uint64_t croot);
+int isprivate(int height, uint64_t root, uint64_t key);
 
 
 void __rcache_init(void);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/requests-async.c
--- a/tools/blktap/parallax/requests-async.c    Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/requests-async.c    Mon Oct 24 15:08:13 2005
@@ -27,14 +27,14 @@
 #endif
 
 struct block_info {
-    u32        crc;
-    u32        unused;
+    uint32_t        crc;
+    uint32_t        unused;
 };
 
 struct io_req {
     enum { IO_OP_READ, IO_OP_WRITE } op;
-    u64        root;
-    u64        vaddr;
+    uint64_t        root;
+    uint64_t        vaddr;
     int        state;
     io_cb_t    cb;
     void      *param;
@@ -44,7 +44,7 @@
     struct io_ret     retval;/* holds the return while we unlock. */
     char             *block; /* the block to write */
     radix_tree_node   radix[3];
-    u64               radix_addr[3];
+    uint64_t               radix_addr[3];
     struct block_info bi;
 };
 
@@ -129,7 +129,7 @@
 static void read_cb(struct io_ret ret, void *param);
 static void write_cb(struct io_ret ret, void *param);
 
-int vdi_read(vdi_t *vdi, u64 vaddr, io_cb_t cb, void *param)
+int vdi_read(vdi_t *vdi, uint64_t vaddr, io_cb_t cb, void *param)
 {
     struct io_req *req;
 
@@ -156,7 +156,7 @@
 }
 
 
-int   vdi_write(vdi_t *vdi, u64 vaddr, char *block, 
+int   vdi_write(vdi_t *vdi, uint64_t vaddr, char *block, 
                 io_cb_t cb, void *param)
 {
     struct io_req *req;
@@ -177,8 +177,8 @@
     req->block  = block;
     /* Todo: add a pseodoheader to the block to include some location   */
     /* information in the CRC as well.                                  */
-    req->bi.crc = (u32) crc32(0L, Z_NULL, 0); 
-    req->bi.crc = (u32) crc32(req->bi.crc, block, BLOCK_SIZE); 
+    req->bi.crc = (uint32_t) crc32(0L, Z_NULL, 0); 
+    req->bi.crc = (uint32_t) crc32(req->bi.crc, block, BLOCK_SIZE); 
     req->bi.unused = 0xdeadbeef;
 
     req->cb     = cb;
@@ -196,7 +196,7 @@
 {
     struct io_req *req = (struct io_req *)param;
     radix_tree_node node;
-    u64 idx;
+    uint64_t idx;
     char *block;
     void *req_param;
 
@@ -268,15 +268,15 @@
     }
     case READ_DATA:
     {
-        u32 crc;
+        uint32_t crc;
 
         DPRINTF("READ_DATA\n");
         block = IO_BLOCK(ret);
         if (block == NULL) goto fail;
 
         /* crc check */
-        crc = (u32) crc32(0L, Z_NULL, 0); 
-        crc = (u32) crc32(crc, block, BLOCK_SIZE); 
+        crc = (uint32_t) crc32(0L, Z_NULL, 0); 
+        crc = (uint32_t) crc32(crc, block, BLOCK_SIZE); 
         if (crc != req->bi.crc) {
             /* TODO: add a retry loop here.                          */
             /* Do this after the cache is added -- make sure to      */
@@ -359,7 +359,7 @@
 {
     struct io_req *req = (struct io_req *)param;
     radix_tree_node node;
-    u64 a, addr;
+    uint64_t a, addr;
     void *req_param;
     struct block_info *bi;
 
@@ -721,7 +721,7 @@
     }
 }
 
-char *vdi_read_s(vdi_t *vdi, u64 vaddr)
+char *vdi_read_s(vdi_t *vdi, uint64_t vaddr)
 {
     pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
     char *block = NULL;
@@ -742,7 +742,7 @@
 }
 
 
-int vdi_write_s(vdi_t *vdi, u64 vaddr, char *block)
+int vdi_write_s(vdi_t *vdi, uint64_t vaddr, char *block)
 {
     pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
     int ret, result;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/requests-async.h
--- a/tools/blktap/parallax/requests-async.h    Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/requests-async.h    Mon Oct 24 15:08:13 2005
@@ -10,18 +10,18 @@
 #define getid(x) (((x)>>1)&0x7fffffffffffffffLLU)
 #define iswritable(x) (((x) & 1LLU) != 0)
 #define writable(x) (((x) << 1) | 1LLU)
-#define readonly(x) ((u64)((x) << 1))
+#define readonly(x) ((uint64_t)((x) << 1))
 */
 
 #define VADDR_MASK 0x0000000003ffffffLLU /* 26-bits = 256Gig */
 #define VALID_VADDR(x) (((x) & VADDR_MASK) == (x))
 
-int vdi_read (vdi_t *vdi, u64 vaddr, io_cb_t cb, void *param);
-int vdi_write(vdi_t *vdi, u64 vaddr, char *block, io_cb_t cb, void *param);
+int vdi_read (vdi_t *vdi, uint64_t vaddr, io_cb_t cb, void *param);
+int vdi_write(vdi_t *vdi, uint64_t vaddr, char *block, io_cb_t cb, void 
*param);
              
 /* synchronous versions: */
-char *vdi_read_s (vdi_t *vdi, u64 vaddr);
-int   vdi_write_s(vdi_t *vdi, u64 vaddr, char *block);
+char *vdi_read_s (vdi_t *vdi, uint64_t vaddr);
+int   vdi_write_s(vdi_t *vdi, uint64_t vaddr, char *block);
 
 #define ERR_BAD_VADDR  -1
 #define ERR_NOMEM      -2
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/snaplog.c
--- a/tools/blktap/parallax/snaplog.c   Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/snaplog.c   Mon Oct 24 15:08:13 2005
@@ -24,7 +24,7 @@
 
 
 
-snap_block_t *snap_get_block(u64 block)
+snap_block_t *snap_get_block(uint64_t block)
 {
     snap_block_t *blk = (snap_block_t *)readblock(block);
     
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/snaplog.h
--- a/tools/blktap/parallax/snaplog.h   Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/snaplog.h   Mon Oct 24 15:08:13 2005
@@ -13,12 +13,12 @@
 #define __SNAPLOG_H__
 
 typedef struct snap_id {
-    u64            block;
+    uint64_t            block;
     unsigned int   index;
 } snap_id_t;
 
 typedef struct snap_rec {
-    u64            radix_root;
+    uint64_t            radix_root;
     struct timeval timestamp;
     /* flags: */
     unsigned       deleted:1;
@@ -38,7 +38,7 @@
 static const snap_id_t null_snap_id = { 0, 0 }; 
 
 typedef struct snap_block_hdr {
-    u64            magic;
+    uint64_t            magic;
     snap_id_t      parent_block; /* parent block within this chain */
     snap_id_t      fork_block;   /* where this log was forked */
     unsigned       log_entries;  /* total entries since forking */
@@ -56,6 +56,6 @@
 } snap_block_t;
     
 
-snap_block_t *snap_get_block(u64 block);
+snap_block_t *snap_get_block(uint64_t block);
 
 #endif /* __SNAPLOG_H__ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi.c
--- a/tools/blktap/parallax/vdi.c       Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi.c       Mon Oct 24 15:08:13 2005
@@ -132,9 +132,9 @@
 /* vdi_get and vdi_put currently act more like alloc/free -- they don't 
  * do refcount-based allocation.  
  */
-vdi_t *vdi_get(u64 vdi_id)
-{
-    u64 vdi_blk;
+vdi_t *vdi_get(uint64_t vdi_id)
+{
+    uint64_t vdi_blk;
     vdi_t *vdi;
     
     vdi_blk = lookup(VDI_REG_HEIGHT, VDI_RADIX_ROOT, vdi_id);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi.h
--- a/tools/blktap/parallax/vdi.h       Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi.h       Mon Oct 24 15:08:13 2005
@@ -21,9 +21,9 @@
 
 
 typedef struct vdi {
-    u64         id;               /* unique vdi id -- used by the registry   */
-    u64         block;            /* block where this vdi lives (also unique)*/
-    u64         radix_root;       /* radix root node for block mappings      */
+    uint64_t         id;               /* unique vdi id -- used by the 
registry   */
+    uint64_t         block;            /* block where this vdi lives (also 
unique)*/
+    uint64_t         radix_root;       /* radix root node for block mappings   
   */
     snap_id_t   snap;             /* next snapshot slot for this VDI         */
     struct vdi *next;             /* used to hash-chain in blkif.            */
     blkif_vdev_t vdevice;         /* currently mounted as...                 */
@@ -34,19 +34,19 @@
 #define VDI_REG_MAGIC   0xff00ff0bb0ff00ffLL
 
 typedef struct vdi_registry {
-    u64     magic;
-    u64     nr_vdis;
+    uint64_t     magic;
+    uint64_t     nr_vdis;
 } vdi_registry_t;
 
 
 int __init_vdi(void);
 
-vdi_t *vdi_get(u64 vdi_id);
+vdi_t *vdi_get(uint64_t vdi_id);
 void vdi_put(vdi_t *vdi);
 vdi_registry_t *get_vdi_registry(void);
 vdi_t *vdi_create(snap_id_t *parent_snap, char *name);
-u64 vdi_lookup_block(vdi_t *vdi, u64 vdi_block, int *writable);
-void vdi_update_block(vdi_t *vdi, u64 vdi_block, u64 g_block);
+uint64_t vdi_lookup_block(vdi_t *vdi, uint64_t vdi_block, int *writable);
+void vdi_update_block(vdi_t *vdi, uint64_t vdi_block, uint64_t g_block);
 void vdi_snapshot(vdi_t *vdi);
 
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi_create.c
--- a/tools/blktap/parallax/vdi_create.c        Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi_create.c        Mon Oct 24 15:08:13 2005
@@ -33,7 +33,7 @@
     name[VDI_NAME_SZ] = '\0';    
     
     if ( argc > 3 ) {
-        id.block   = (u64)          atoll(argv[2]);
+        id.block   = (uint64_t)          atoll(argv[2]);
         id.index   = (unsigned int) atol (argv[3]);
         from_snap  = 1;
     }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi_fill.c
--- a/tools/blktap/parallax/vdi_fill.c  Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi_fill.c  Mon Oct 24 15:08:13 2005
@@ -22,13 +22,13 @@
 int main(int argc, char *argv[])
 {
     vdi_t       *vdi;
-    u64          id;
+    uint64_t          id;
     int          fd;
     struct stat  st;
-    u64          tot_size;
+    uint64_t          tot_size;
     char         spage[BLOCK_SIZE];
     char        *dpage;
-    u64          vblock = 0, count=0;
+    uint64_t          vblock = 0, count=0;
     
     __init_blockstore();
     init_block_async();
@@ -39,7 +39,7 @@
         exit(-1);
     }
         
-    id = (u64) atoll(argv[1]);
+    id = (uint64_t) atoll(argv[1]);
     
     vdi = vdi_get( id );
     
@@ -60,7 +60,7 @@
         exit(-1);
     }
     
-    tot_size = (u64) st.st_size;
+    tot_size = (uint64_t) st.st_size;
     printf("Filling VDI %Ld with %Ld bytes.\n", id, tot_size);
     
     printf("%011Ld blocks total\n", tot_size / BLOCK_SIZE);    
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi_snap.c
--- a/tools/blktap/parallax/vdi_snap.c  Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi_snap.c  Mon Oct 24 15:08:13 2005
@@ -17,7 +17,7 @@
 int main(int argc, char *argv[])
 {
     vdi_t  *vdi;
-    u64     id;
+    uint64_t     id;
     
     __init_blockstore();
     __init_vdi();
@@ -27,7 +27,7 @@
         exit(-1);
     }
     
-    id = (u64) atoll(argv[1]);
+    id = (uint64_t) atoll(argv[1]);
     
     vdi = vdi_get(id);
     
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi_snap_delete.c
--- a/tools/blktap/parallax/vdi_snap_delete.c   Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi_snap_delete.c   Mon Oct 24 15:08:13 2005
@@ -34,7 +34,7 @@
         exit(-1);
     }
     
-    id.block   = (u64)          atoll(argv[1]);
+    id.block   = (uint64_t)          atoll(argv[1]);
     id.index   = (unsigned int) atol (argv[2]);
     
     c_id = id;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi_snap_list.c
--- a/tools/blktap/parallax/vdi_snap_list.c     Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi_snap_list.c     Mon Oct 24 15:08:13 2005
@@ -18,7 +18,7 @@
 int main(int argc, char *argv[])
 {
     vdi_t        *vdi;
-    u64           id;
+    uint64_t           id;
     int           i, max_snaps = -1;
     snap_block_t *blk;
     snap_id_t     sid;
@@ -32,7 +32,7 @@
         exit(-1);
     }
     
-    id = (u64) atoll(argv[1]);
+    id = (uint64_t) atoll(argv[1]);
     
     if ( argc > 2 ) {
         max_snaps = atoi(argv[2]);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi_unittest.c
--- a/tools/blktap/parallax/vdi_unittest.c      Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi_unittest.c      Mon Oct 24 15:08:13 2005
@@ -39,9 +39,9 @@
     zero_page = newblock();
 }
 
-inline u64 make_vaddr(u64 L1, u64 L2, u64 L3)
+inline uint64_t make_vaddr(uint64_t L1, uint64_t L2, uint64_t L3)
 {
-    u64 ret = L1;
+    uint64_t ret = L1;
 
     ret = (ret << 9) | L2;
     ret = (ret << 9) | L3;
@@ -49,9 +49,9 @@
     return ret;
 }
 
-void touch_block(vdi_t *vdi, u64 L1, u64 L2, u64 L3)
+void touch_block(vdi_t *vdi, uint64_t L1, uint64_t L2, uint64_t L3)
 {
-    u64 vaddr;
+    uint64_t vaddr;
     char *page = pages[next_page++];
     char *rpage = NULL;
 
@@ -76,9 +76,9 @@
     freeblock(rpage);
 }
 
-void test_block(vdi_t *vdi, u64 L1, u64 L2, u64 L3, char *page)
+void test_block(vdi_t *vdi, uint64_t L1, uint64_t L2, uint64_t L3, char *page)
 {
-    u64 vaddr;
+    uint64_t vaddr;
     char *rpage = NULL;
 
     printf("TEST  (%3Lu, %3Lu, %3Lu)\n", L1, L2, L3);
@@ -103,7 +103,7 @@
 
 void coverage_test(vdi_t *vdi)
 {
-    u64 vaddr;
+    uint64_t vaddr;
     int i, j, k;
 
     /* Do a series of writes and reads to test all paths through the 
@@ -155,13 +155,13 @@
 int main(int argc, char *argv[])
 {
     vdi_t       *vdi;
-    u64          id;
+    uint64_t          id;
     int          fd;
     struct stat  st;
-    u64          tot_size;
+    uint64_t          tot_size;
     char         spage[BLOCK_SIZE];
     char        *dpage;
-    u64          vblock = 0, count=0;
+    uint64_t          vblock = 0, count=0;
     
     __init_blockstore();
     init_block_async();
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/parallax/vdi_validate.c
--- a/tools/blktap/parallax/vdi_validate.c      Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/parallax/vdi_validate.c      Mon Oct 24 15:08:13 2005
@@ -23,13 +23,13 @@
 int main(int argc, char *argv[])
 {
     vdi_t       *vdi;
-    u64          id;
+    uint64_t          id;
     int          fd;
     struct stat  st;
-    u64          tot_size;
+    uint64_t          tot_size;
     char         spage[BLOCK_SIZE], *dpage;
     char        *vpage;
-    u64          vblock = 0, count=0;
+    uint64_t          vblock = 0, count=0;
     
     __init_blockstore();
     init_block_async();
@@ -40,7 +40,7 @@
         exit(-1);
     }
         
-    id = (u64) atoll(argv[1]);
+    id = (uint64_t) atoll(argv[1]);
     
     vdi = vdi_get( id );
     
@@ -61,7 +61,7 @@
         exit(-1);
     }
     
-    tot_size = (u64) st.st_size;
+    tot_size = (uint64_t) st.st_size;
     printf("Testing VDI %Ld (%Ld bytes).\n", id, tot_size);
     
     printf("           ");
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/blktap/ublkback/ublkbacklib.c
--- a/tools/blktap/ublkback/ublkbacklib.c       Fri Oct 21 19:58:39 2005
+++ b/tools/blktap/ublkback/ublkbacklib.c       Mon Oct 24 15:08:13 2005
@@ -112,7 +112,7 @@
 typedef struct image {
     /* These need to turn into an array/rbtree for multi-disk support. */
     int  fd;
-    u64  fsid;
+    uint64_t  fsid;
     blkif_vdev_t   vdevice;
     long int size;
     long int secsize;
@@ -190,7 +190,7 @@
   }
 */
     if (image->size == 0)
-        image->size =((u64) 16836057);
+        image->size =((uint64_t) 16836057);
     image->secsize = 512;
     image->info = 0;
 
@@ -215,7 +215,7 @@
 int ublkback_request(blkif_t *blkif, blkif_request_t *req, int batch_done)
 {
     int fd;
-    u64 sector;
+    uint64_t sector;
     char *spage, *dpage;
     int ret, i, idx;
     blkif_response_t *rsp;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/console/daemon/io.c
--- a/tools/console/daemon/io.c Fri Oct 21 19:58:39 2005
+++ b/tools/console/daemon/io.c Mon Oct 24 15:08:13 2005
@@ -79,44 +79,43 @@
 static void buffer_append(struct domain *dom)
 {
        struct buffer *buffer = &dom->buffer;
-       size_t size;
-       XENCONS_RING_IDX oldcons;
-       int notify = 0;
+       XENCONS_RING_IDX cons, prod, size;
        struct xencons_interface *intf = dom->interface;
 
-       while ((size = (intf->out_prod - intf->out_cons)) != 0) {
-               notify = 1;
-
-               if ((buffer->capacity - buffer->size) < size) {
-                       buffer->capacity += (size + 1024);
-                       buffer->data = realloc(buffer->data, buffer->capacity);
-                       if (buffer->data == NULL) {
-                               dolog(LOG_ERR, "Memory allocation failed");
-                               exit(ENOMEM);
-                       }
-               }
-
-               oldcons = intf->out_cons;
-               while ((intf->out_cons - oldcons) < size) {
-                       buffer->data[buffer->size] = intf->out[
-                               MASK_XENCONS_IDX(intf->out_cons, intf->out)];
-                       buffer->size++;
-                       intf->out_cons++;
-               }
-
-               if (buffer->max_capacity &&
-                   buffer->size > buffer->max_capacity) {
-                       memmove(buffer->data + (buffer->size -
-                                               buffer->max_capacity),
-                               buffer->data, buffer->max_capacity);
-                       buffer->data = realloc(buffer->data,
-                                              buffer->max_capacity);
-                       buffer->capacity = buffer->max_capacity;
-               }
-       }
-
-       if (notify)
-               evtchn_notify(dom);
+       cons = intf->out_cons;
+       prod = intf->out_prod;
+       mb();
+
+       size = prod - cons;
+       if ((size == 0) || (size > sizeof(intf->out)))
+               return;
+
+       if ((buffer->capacity - buffer->size) < size) {
+               buffer->capacity += (size + 1024);
+               buffer->data = realloc(buffer->data, buffer->capacity);
+               if (buffer->data == NULL) {
+                       dolog(LOG_ERR, "Memory allocation failed");
+                       exit(ENOMEM);
+               }
+       }
+
+       while (cons != prod)
+               buffer->data[buffer->size++] = intf->out[
+                       MASK_XENCONS_IDX(cons++, intf->out)];
+
+       mb();
+       intf->out_cons = cons;
+       evtchn_notify(dom);
+
+       if (buffer->max_capacity &&
+           buffer->size > buffer->max_capacity) {
+               memmove(buffer->data + (buffer->size -
+                                       buffer->max_capacity),
+                       buffer->data, buffer->max_capacity);
+               buffer->data = realloc(buffer->data,
+                                      buffer->max_capacity);
+               buffer->capacity = buffer->max_capacity;
+       }
 }
 
 static bool buffer_empty(struct buffer *buffer)
@@ -164,6 +163,16 @@
                        tcsetattr(master, TCSAFLUSH, &term);
                }
 
+               success = asprintf(&path, "%s/limit", dom->conspath) != -1;
+               if (!success)
+                       goto out;
+               data = xs_read(xs, NULL, path, &len);
+               if (data) {
+                       dom->buffer.max_capacity = strtoul(data, 0, 0);
+                       free(data);
+               }
+               free(path);
+
                success = asprintf(&path, "%s/tty", dom->conspath) != -1;
                if (!success)
                        goto out;
@@ -171,16 +180,6 @@
                free(path);
                if (!success)
                        goto out;
-
-               success = asprintf(&path, "%s/limit", dom->conspath) != -1;
-               if (!success)
-                       goto out;
-               data = xs_read(xs, NULL, path, &len);
-               if (data) {
-                       dom->buffer.max_capacity = strtoul(data, 0, 0);
-                       free(data);
-               }
-               free(path);
        }
 
        return master;
@@ -270,6 +269,18 @@
        }
        dom->local_port = rc;
 
+       if (dom->tty_fd == -1) {
+               dom->tty_fd = domain_create_tty(dom);
+
+               if (dom->tty_fd == -1) {
+                       err = errno;
+                       close(dom->evtchn_fd);
+                       dom->evtchn_fd = -1;
+                       dom->local_port = -1;
+                       goto out;
+               }
+       }
+
  out:
        return err;
 }
@@ -302,10 +313,7 @@
        }
 
        dom->domid = domid;
-
        dom->conspath = xs_get_domain_path(xs, dom->domid);
-       if (dom->conspath == NULL)
-               goto out;
        s = realloc(dom->conspath, strlen(dom->conspath) +
                    strlen("/console") + 1);
        if (s == NULL)
@@ -313,7 +321,7 @@
        dom->conspath = s;
        strcat(dom->conspath, "/console");
 
-       dom->tty_fd = domain_create_tty(dom);
+       dom->tty_fd = -1;
        dom->is_dead = false;
        dom->buffer.data = 0;
        dom->buffer.size = 0;
@@ -419,10 +427,14 @@
        char msg[80];
        int i;
        struct xencons_interface *intf = dom->interface;
-       XENCONS_RING_IDX filled = intf->in_prod - intf->in_cons;
-
-       if (sizeof(intf->in) > filled)
-               len = sizeof(intf->in) - filled;
+       XENCONS_RING_IDX cons, prod;
+
+       cons = intf->in_cons;
+       prod = intf->in_prod;
+       mb();
+
+       if (sizeof(intf->in) > (prod - cons))
+               len = sizeof(intf->in) - (prod - cons);
        if (len > sizeof(msg))
                len = sizeof(msg);
 
@@ -441,10 +453,11 @@
                }
        } else if (domain_is_valid(dom->domid)) {
                for (i = 0; i < len; i++) {
-                       intf->in[MASK_XENCONS_IDX(intf->in_prod, intf->in)] =
+                       intf->in[MASK_XENCONS_IDX(prod++, intf->in)] =
                                msg[i];
-                       intf->in_prod++;
-               }
+               }
+               wmb();
+               intf->in_prod = prod;
                evtchn_notify(dom);
        } else {
                close(dom->tty_fd);
@@ -474,7 +487,7 @@
 
 static void handle_ring_read(struct domain *dom)
 {
-       u16 v;
+       uint16_t v;
 
        if (!read_sync(dom->evtchn_fd, &v, sizeof(v)))
                return;
diff -r ff7c5a791ed5 -r fdea4a967bc7 
tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c
--- a/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c     
Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c     
Mon Oct 24 15:08:13 2005
@@ -38,7 +38,7 @@
 #include <xenctrl.h>
 #define TRACE_ENTER /* printf("enter %s\n", __FUNCTION__) */
 
-long (*myptrace)(int xc_handle, enum __ptrace_request, u32, long, long);
+long (*myptrace)(int xc_handle, enum __ptrace_request, uint32_t, long, long);
 int (*myxcwait)(int xc_handle, int domain, int *status, int options) ;
 static int xc_handle;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/libxendebug/xendebug.c
--- a/tools/debugger/libxendebug/xendebug.c     Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/libxendebug/xendebug.c     Mon Oct 24 15:08:13 2005
@@ -41,8 +41,8 @@
 {
     struct list_head list;
     unsigned long address;
-    u32 domain;
-    u8 old_value;                             /* old value for software bkpt */
+    uint32_t domain;
+    uint8_t old_value;                             /* old value for software 
bkpt */
 } bwcpoint_t, *bwcpoint_p;
 
 static bwcpoint_t bwcpoint_list;
@@ -52,7 +52,7 @@
 typedef struct domain_context                 /* local cache of domain state */
 {
     struct list_head     list;
-    u32                  domid;
+    uint32_t                  domid;
     boolean              valid[MAX_VIRT_CPUS];
     vcpu_guest_context_t context[MAX_VIRT_CPUS];
 
@@ -92,7 +92,7 @@
 /**************/
 
 static domain_context_p
-xendebug_domain_context_search (u32 domid)
+xendebug_domain_context_search (uint32_t domid)
 {
     struct list_head *entry;
     domain_context_p  ctxt;
@@ -107,7 +107,7 @@
 }
 
 static __inline__ domain_context_p
-xendebug_get_context (int xc_handle, u32 domid, u32 vcpu)
+xendebug_get_context (int xc_handle, uint32_t domid, uint32_t vcpu)
 {
     int rc;
     domain_context_p ctxt;
@@ -130,7 +130,7 @@
 }
 
 static __inline__ int
-xendebug_set_context (int xc_handle, domain_context_p ctxt, u32 vcpu)
+xendebug_set_context (int xc_handle, domain_context_p ctxt, uint32_t vcpu)
 {
     dom0_op_t op;
     int rc;
@@ -157,8 +157,8 @@
 
 int
 xendebug_attach(int xc_handle,
-                u32 domid,
-                u32 vcpu)
+                uint32_t domid,
+                uint32_t vcpu)
 {
     domain_context_p ctxt;
 
@@ -176,8 +176,8 @@
 
 int
 xendebug_detach(int xc_handle,
-                u32 domid,
-                u32 vcpu)
+                uint32_t domid,
+                uint32_t vcpu)
 {
     domain_context_p ctxt;
     
@@ -197,8 +197,8 @@
 
 int
 xendebug_read_registers(int xc_handle,
-                        u32 domid,
-                        u32 vcpu,
+                        uint32_t domid,
+                        uint32_t vcpu,
                         cpu_user_regs_t **regs)
 {
     domain_context_p ctxt;
@@ -218,8 +218,8 @@
 
 int
 xendebug_read_fpregisters (int xc_handle,
-                           u32 domid,
-                           u32 vcpu,
+                           uint32_t domid,
+                           uint32_t vcpu,
                            char **regs)
 {
     domain_context_p ctxt;
@@ -239,8 +239,8 @@
 
 int
 xendebug_write_registers(int xc_handle,
-                         u32 domid,
-                         u32 vcpu,
+                         uint32_t domid,
+                         uint32_t vcpu,
                          cpu_user_regs_t *regs)
 {
     domain_context_p ctxt;
@@ -260,8 +260,8 @@
 
 int
 xendebug_step(int xc_handle,
-              u32 domid,
-              u32 vcpu)
+              uint32_t domid,
+              uint32_t vcpu)
 {
     domain_context_p ctxt;
     int rc;
@@ -282,8 +282,8 @@
 
 int
 xendebug_continue(int xc_handle,
-                  u32 domid,
-                  u32 vcpu)
+                  uint32_t domid,
+                  uint32_t vcpu)
 {
     domain_context_p ctxt;
     int rc;
@@ -310,8 +310,8 @@
 
 /* access to one page */
 static int
-xendebug_memory_page (domain_context_p ctxt, int xc_handle, u32 vcpu,
-                      int protection, unsigned long address, int length, u8 
*buffer)
+xendebug_memory_page (domain_context_p ctxt, int xc_handle, uint32_t vcpu,
+                      int protection, unsigned long address, int length, 
uint8_t *buffer)
 {
     vcpu_guest_context_t *vcpu_ctxt = &ctxt->context[vcpu];
     unsigned long pde, page;
@@ -406,8 +406,8 @@
 
 /* divide a memory operation into accesses to individual pages */
 static int
-xendebug_memory_op (domain_context_p ctxt, int xc_handle, u32 vcpu,
-                    int protection, unsigned long address, int length, u8 
*buffer)
+xendebug_memory_op (domain_context_p ctxt, int xc_handle, uint32_t vcpu,
+                    int protection, unsigned long address, int length, uint8_t 
*buffer)
 {
     int      remain;              /* number of bytes to touch past this page */
     int      bytes   = 0;
@@ -429,11 +429,11 @@
 
 int
 xendebug_read_memory(int xc_handle,
-                     u32 domid,
-                     u32 vcpu,
+                     uint32_t domid,
+                     uint32_t vcpu,
                      unsigned long address,
-                     u32 length,
-                     u8 *data)
+                     uint32_t length,
+                     uint8_t *data)
 {
     domain_context_p ctxt;
 
@@ -449,11 +449,11 @@
 
 int
 xendebug_write_memory(int xc_handle,
-                      u32 domid,
-                      u32 vcpu,
+                      uint32_t domid,
+                      uint32_t vcpu,
                       unsigned long address,
-                      u32 length,
-                      u8 *data)
+                      uint32_t length,
+                      uint8_t *data)
 {
     domain_context_p ctxt;
 
@@ -469,13 +469,13 @@
 
 int
 xendebug_insert_memory_breakpoint(int xc_handle,
-                                  u32 domid,
-                                  u32 vcpu,
+                                  uint32_t domid,
+                                  uint32_t vcpu,
                                   unsigned long address,
-                                  u32 length)
+                                  uint32_t length)
 {
     bwcpoint_p bkpt;
-    u8 breakpoint_opcode = 0xcc;
+    uint8_t breakpoint_opcode = 0xcc;
 
     printf("insert breakpoint %d:%lx %d\n",
             domid, address, length);
@@ -515,10 +515,10 @@
 
 int
 xendebug_remove_memory_breakpoint(int xc_handle,
-                                  u32 domid,
-                                  u32 vcpu,
+                                  uint32_t domid,
+                                  uint32_t vcpu,
                                   unsigned long address,
-                                  u32 length)
+                                  uint32_t length)
 {
     bwcpoint_p bkpt = NULL;
 
@@ -552,7 +552,7 @@
 xendebug_query_domain_stop(int xc_handle, int *dom_list, int dom_list_size)
 {
     xc_dominfo_t *info;
-    u32 first_dom = 0;
+    uint32_t first_dom = 0;
     int max_doms = 1024;
     int nr_doms, loop;
     int count = 0;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/libxendebug/xendebug.h
--- a/tools/debugger/libxendebug/xendebug.h     Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/libxendebug/xendebug.h     Mon Oct 24 15:08:13 2005
@@ -12,63 +12,63 @@
 #include <xenctrl.h>
 
 int xendebug_attach(int xc_handle,
-                   u32 domid,
-                   u32 vcpu);
+                   uint32_t domid,
+                   uint32_t vcpu);
 
 int xendebug_detach(int xc_handle,
-                   u32 domid,
-                   u32 vcpu);
+                   uint32_t domid,
+                   uint32_t vcpu);
 
 int xendebug_read_registers(int xc_handle,
-                           u32 domid,
-                           u32 vcpu,
+                           uint32_t domid,
+                           uint32_t vcpu,
                            cpu_user_regs_t **regs);
 
 int xendebug_read_fpregisters (int xc_handle,
-                              u32 domid,
-                              u32 vcpu,
+                              uint32_t domid,
+                              uint32_t vcpu,
                               char **regs);
 
 int xendebug_write_registers(int xc_handle,
-                            u32 domid,
-                            u32 vcpu,
+                            uint32_t domid,
+                            uint32_t vcpu,
                             cpu_user_regs_t *regs);
 
 int xendebug_step(int xc_handle,
-                 u32 domid,
-                 u32 vcpu);
+                 uint32_t domid,
+                 uint32_t vcpu);
 
 int xendebug_continue(int xc_handle,
-                     u32 domid,
-                     u32 vcpu);
+                     uint32_t domid,
+                     uint32_t vcpu);
 
 int xendebug_read_memory(int xc_handle,
-                        u32 domid,
-                        u32 vcpu,
+                        uint32_t domid,
+                        uint32_t vcpu,
                         unsigned long address,
-                        u32 length,
-                        u8 *data);
+                        uint32_t length,
+                        uint8_t *data);
 
 
 int xendebug_write_memory(int xc_handle,
-                         u32 domid,
-                         u32 vcpu,
+                         uint32_t domid,
+                         uint32_t vcpu,
                          unsigned long address,
-                         u32 length,
-                         u8 *data);
+                         uint32_t length,
+                         uint8_t *data);
 
 
 int xendebug_insert_memory_breakpoint(int xc_handle,
-                                     u32 domid,
-                                     u32 vcpu,
+                                     uint32_t domid,
+                                     uint32_t vcpu,
                                      unsigned long address,
-                                     u32 length);
+                                     uint32_t length);
 
 int xendebug_remove_memory_breakpoint(int xc_handle,
-                                     u32 domid,
-                                     u32 vcpu,
+                                     uint32_t domid,
+                                     uint32_t vcpu,
                                      unsigned long address,
-                                     u32 length);
+                                     uint32_t length);
 
 int xendebug_query_domain_stop(int xc_handle,
                               int *dom_list, 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/pdb/linux-2.6-module/debug.c
--- a/tools/debugger/pdb/linux-2.6-module/debug.c       Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/linux-2.6-module/debug.c       Mon Oct 24 15:08:13 2005
@@ -29,13 +29,13 @@
     unsigned long address;
     int length;
 
-    u8  type;                                                     /* BWC_??? */
-    u8  mode;                   /* for BWC_PAGE, the current protection mode */
-    u32 process;
-    u8  error;                /* error occured when enabling: don't disable. */
+    uint8_t  type;                                                     /* 
BWC_??? */
+    uint8_t  mode;                   /* for BWC_PAGE, the current protection 
mode */
+    uint32_t process;
+    uint8_t  error;                /* error occured when enabling: don't 
disable. */
 
     /* original values */
-    u8    orig_bkpt;                               /* single byte breakpoint */
+    uint8_t    orig_bkpt;                               /* single byte 
breakpoint */
     pte_t orig_pte;
 
     struct list_head watchpt_read_list;     /* read watchpoints on this page */
@@ -109,7 +109,7 @@
 }
 
 bwcpoint_p
-pdb_search_watchpoint (u32 process, unsigned long address)
+pdb_search_watchpoint (uint32_t process, unsigned long address)
 {
     bwcpoint_p bwc_watch = (bwcpoint_p) 0;
     bwcpoint_p bwc_entry = (bwcpoint_p) 0;
@@ -150,7 +150,7 @@
 int
 pdb_suspend (struct task_struct *target)
 {
-    u32 rc = 0;
+    uint32_t rc = 0;
 
     force_sig(SIGSTOP, target);                    /* force_sig_specific ??? */
 
@@ -364,11 +364,11 @@
 
 int
 pdb_insert_memory_breakpoint (struct task_struct *target, 
-                              unsigned long address, u32 length)
+                              unsigned long address, uint32_t length)
 {
     int rc = 0;
     bwcpoint_p bkpt;
-    u8 breakpoint_opcode = 0xcc;
+    uint8_t breakpoint_opcode = 0xcc;
 
     printk("insert breakpoint %d:%lx len: %d\n", target->pid, address, length);
 
@@ -399,7 +399,7 @@
 
 int
 pdb_remove_memory_breakpoint (struct task_struct *target,
-                              unsigned long address, u32 length)
+                              unsigned long address, uint32_t length)
 {
     int rc = 0;
     bwcpoint_p bkpt = NULL;
diff -r ff7c5a791ed5 -r fdea4a967bc7 
tools/debugger/pdb/linux-2.6-module/pdb_debug.h
--- a/tools/debugger/pdb/linux-2.6-module/pdb_debug.h   Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/linux-2.6-module/pdb_debug.h   Mon Oct 24 15:08:13 2005
@@ -18,9 +18,9 @@
 int pdb_step (struct task_struct *target);
 
 int pdb_insert_memory_breakpoint (struct task_struct *target, 
-                                  unsigned long address, u32 length);
+                                  unsigned long address, uint32_t length);
 int pdb_remove_memory_breakpoint (struct task_struct *target,
-                                  unsigned long address, u32 length);
+                                  unsigned long address, uint32_t length);
 int pdb_insert_watchpoint (struct task_struct *target,
                            pdb_op_watchpt_p watchpt);
 int pdb_remove_watchpoint (struct task_struct *target,
diff -r ff7c5a791ed5 -r fdea4a967bc7 
tools/debugger/pdb/linux-2.6-module/pdb_module.h
--- a/tools/debugger/pdb/linux-2.6-module/pdb_module.h  Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/linux-2.6-module/pdb_module.h  Mon Oct 24 15:08:13 2005
@@ -9,7 +9,7 @@
 #define PDB_OPCODE_ATTACH 2
 typedef struct pdb_op_attach
 {
-    u32  domain;
+    uint32_t  domain;
 } pdb_op_attach_t, *pdb_op_attach_p;
 
 #define PDB_OPCODE_DETACH 3
@@ -17,43 +17,43 @@
 #define PDB_OPCODE_RD_REG 4
 typedef struct pdb_op_rd_reg
 {
-    u32 reg;
-    u32 value;
+    uint32_t reg;
+    uint32_t value;
 } pdb_op_rd_reg_t, *pdb_op_rd_reg_p;
 
 #define PDB_OPCODE_RD_REGS 5
 typedef struct pdb_op_rd_regs
 {
-    u32 reg[GDB_REGISTER_FRAME_SIZE];
+    uint32_t reg[GDB_REGISTER_FRAME_SIZE];
 } pdb_op_rd_regs_t, *pdb_op_rd_regs_p;
 
 #define PDB_OPCODE_WR_REG 6
 typedef struct pdb_op_wr_reg
 {
-    u32 reg;
-    u32 value;
+    uint32_t reg;
+    uint32_t value;
 } pdb_op_wr_reg_t, *pdb_op_wr_reg_p;
 
 #define PDB_OPCODE_RD_MEM 7
 typedef struct pdb_op_rd_mem_req
 {
-    u32 address;
-    u32 length;
+    uint32_t address;
+    uint32_t length;
 } pdb_op_rd_mem_req_t, *pdb_op_rd_mem_req_p;
 
 typedef struct pdb_op_rd_mem_resp
 {
-    u32 address;
-    u32 length;
-    u8  data[1024];
+    uint32_t address;
+    uint32_t length;
+    uint8_t  data[1024];
 } pdb_op_rd_mem_resp_t, *pdb_op_rd_mem_resp_p;
 
 #define PDB_OPCODE_WR_MEM 8
 typedef struct pdb_op_wr_mem
 {
-    u32 address;
-    u32 length;
-    u8  data[1024];                                             /* arbitrary */
+    uint32_t address;
+    uint32_t length;
+    uint8_t  data[1024];                                             /* 
arbitrary */
 } pdb_op_wr_mem_t, *pdb_op_wr_mem_p;
 
 #define PDB_OPCODE_CONTINUE 9
@@ -63,8 +63,8 @@
 #define PDB_OPCODE_CLR_BKPT 12
 typedef struct pdb_op_bkpt
 {
-    u32 address;
-    u32 length;
+    uint32_t address;
+    uint32_t length;
 } pdb_op_bkpt_t, *pdb_op_bkpt_p;
 
 #define PDB_OPCODE_SET_WATCHPT 13
@@ -79,16 +79,16 @@
 #define BWC_WATCH_WRITE  102
 #define BWC_WATCH_READ   103
 #define BWC_WATCH_ACCESS 104
-    u32 type;
-    u32 address;
-    u32 length;
+    uint32_t type;
+    uint32_t address;
+    uint32_t length;
 } pdb_op_watchpt_t, *pdb_op_watchpt_p;
 
 
 typedef struct 
 {
-    u8   operation;       /* PDB_OPCODE_???      */
-    u32  process;
+    uint8_t   operation;       /* PDB_OPCODE_???      */
+    uint32_t  process;
     union
     {
         pdb_op_attach_t     attach;
@@ -107,10 +107,10 @@
 #define PDB_RESPONSE_ERROR -1
 
 typedef struct {
-    u8   operation;       /* copied from request */
-    u32  domain;          
-    u32  process;
-    s16  status;          /* PDB_RESPONSE_???    */
+    uint8_t  operation;       /* copied from request */
+    uint32_t domain;          
+    uint32_t process;
+    int16_t  status;          /* PDB_RESPONSE_???    */
     union
     {
         pdb_op_rd_reg_t      rd_reg;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/pdb/pdb_caml_domain.c
--- a/tools/debugger/pdb/pdb_caml_domain.c      Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/pdb_caml_domain.c      Mon Oct 24 15:08:13 2005
@@ -204,7 +204,7 @@
     int loop;
     char *buffer;
     unsigned long my_address = Int32_val(address);
-    u32 my_length = Int_val(length);
+    uint32_t my_length = Int_val(length);
 
     printf ("(pdb) read memory\n");
 
@@ -260,7 +260,7 @@
 
     char buffer[4096];  /* a big buffer */
     unsigned long  my_address;
-    u32 length = 0;
+    uint32_t length = 0;
 
     printf ("(pdb) write memory\n");
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/pdb/pdb_caml_evtchn.c
--- a/tools/debugger/pdb/pdb_caml_evtchn.c      Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/pdb_caml_evtchn.c      Mon Oct 24 15:08:13 2005
@@ -125,7 +125,7 @@
 {
     CAMLparam1(fd);
 
-    u16 v;
+    uint16_t v;
     int bytes;
     int rc = -1;
     int myfd = Int_val(fd);
@@ -168,7 +168,7 @@
     CAMLparam1(fd);
 
     int myfd = Int_val(fd);
-    u16 myidx = Int_val(idx);
+    uint16_t myidx = Int_val(idx);
 
     (void)write(myfd, &myidx, sizeof(myidx));
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/pdb/pdb_caml_process.c
--- a/tools/debugger/pdb/pdb_caml_process.c     Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/pdb_caml_process.c     Mon Oct 24 15:08:13 2005
@@ -404,7 +404,7 @@
 
     context_t ctx;
     pdb_request_t req;
-    u32 length = 0;
+    uint32_t length = 0;
 
     decode_context(&ctx, context);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/pdb/pdb_caml_xc.c
--- a/tools/debugger/pdb/pdb_caml_xc.c  Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/pdb_caml_xc.c  Mon Oct 24 15:08:13 2005
@@ -151,7 +151,7 @@
 }
 
 void *
-map_ring(u32 dom, unsigned long mfn )
+map_ring(uint32_t dom, unsigned long mfn )
 {
     return xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
                                 PROT_READ | PROT_WRITE, mfn);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/debugger/pdb/pdb_caml_xcs.c
--- a/tools/debugger/pdb/pdb_caml_xcs.c Fri Oct 21 19:58:39 2005
+++ b/tools/debugger/pdb/pdb_caml_xcs.c Mon Oct 24 15:08:13 2005
@@ -36,7 +36,7 @@
 #include "pdb_module.h"
 #include "pdb_caml_xen.h"
 
-void *map_ring(u32 dom, unsigned long mfn );
+void *map_ring(uint32_t dom, unsigned long mfn );
 
 /*
  * xcs_initialize_ring : int -> int32 -> int32
@@ -180,7 +180,7 @@
     char *my_path = String_val(path);
     int my_msg_type = Int_val(msg_type);
     struct sockaddr_un addr;
-    u32 session_id = 0;
+    uint32_t session_id = 0;
     int data_fd;
     int ret, len;
     xcs_msg_t msg;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/Makefile
--- a/tools/examples/Makefile   Fri Oct 21 19:58:39 2005
+++ b/tools/examples/Makefile   Mon Oct 24 15:08:13 2005
@@ -24,7 +24,7 @@
 XEN_SCRIPTS += network-nat vif-nat
 XEN_SCRIPTS += block
 XEN_SCRIPTS += block-enbd
-XEN_SCRIPTS += xen-hotplug-common.sh
+XEN_SCRIPTS += xen-hotplug-common.sh xen-network-common.sh vif-common.sh
 
 XEN_HOTPLUG_DIR = /etc/hotplug
 XEN_HOTPLUG_SCRIPTS = xen-backend.agent
@@ -32,7 +32,9 @@
 UDEV_RULES_DIR = /etc/udev/rules.d
 UDEV_RULES = xen-backend.rules
 
-ifeq ($(findstring $(DISTDIR),$(DESTDIR)),$(DISTDIR))
+DI = $(shell readlink -f $(DISTDIR))
+DE = $(shell readlink -f $(DESTDIR))
+ifeq ($(findstring $(DI),$(DE)),$(DI))
 HOTPLUGS=install-hotplug install-udev
 else
 ifeq ($(shell [ -x /sbin/udev ] && [ ! -z `udev -V` ] && [ `/sbin/udev -V` -ge 
059 ] && echo 1),1)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/network-bridge
--- a/tools/examples/network-bridge     Fri Oct 21 19:58:39 2005
+++ b/tools/examples/network-bridge     Mon Oct 24 15:08:13 2005
@@ -5,8 +5,8 @@
 # The script name to use is defined in /etc/xen/xend-config.sxp
 # in the network-script field.
 #
-# This script creates a bridge (default xen-br0), adds a device
-# (default eth0) to it, copies the IP addresses from the device
+# This script creates a bridge (default xenbr${vifnum}), adds a device
+# (default eth${vifnum}) to it, copies the IP addresses from the device
 # to the bridge and adjusts the routes accordingly.
 #
 # If all goes well, this should ensure that networking stays up.
@@ -20,9 +20,12 @@
 #
 # Vars:
 #
-# bridge     The bridge to use (default xen-br0).
-# netdev     The interface to add to the bridge (default eth0).
-# antispoof  Whether to use iptables to prevent spoofing (default yes).
+# vifnum     Virtual device number to use (default 0). Numbers >=1
+#            require the netback driver to have nloopbacks set to a
+#            higher value than its default of 1.
+# bridge     The bridge to use (default xenbr${vifnum}).
+# netdev     The interface to add to the bridge (default eth${vifnum}).
+# antispoof  Whether to use iptables to prevent spoofing (default no).
 #
 # start:
 # Creates the bridge and enslaves netdev to it.
@@ -39,16 +42,8 @@
 #
 #============================================================================
 
-# Gentoo doesn't have ifup/ifdown: define appropriate alternatives
-which ifup >& /dev/null
-if [ "$?" != 0 -a -e /etc/conf.d/net ]; then
-    ifup() {
-        /etc/init.d/net.$1 start
-    }
-    ifdown() {
-        /etc/init.d/net.$1 stop
-    }
-fi
+dir=$(dirname "$0")
+. "$dir/xen-network-common.sh"
 
 # Exit if anything goes wrong.
 set -e 
@@ -60,11 +55,35 @@
 # Pull variables in args in to environment.
 for arg ; do export "${arg}" ; done
 
-bridge=${bridge:-xen-br0}
-netdev=${netdev:-eth0}
+vifnum=${vifnum:-0}
+bridge=${bridge:-xenbr${vifnum}}
+netdev=${netdev:-eth${vifnum}}
 antispoof=${antispoof:-no}
 
-echo "*network $OP bridge=$bridge netdev=$netdev antispoof=$antispoof" >&2
+pdev="p${netdev}"
+vdev="veth${vifnum}"
+vif0="vif0.${vifnum}"
+
+legacy_mask_to_prefix() {
+    mask=$1
+    first=${mask%%.*}
+    second=${mask#*.}
+    third=${second#*.}
+    fourth=${third#*.}
+    second=${second%%.*}
+    third=${third%%.*}
+    declare -i INT FULLMASK BIT
+    INT=$((((($first*256)+$second)*256+$third)*256+$fourth))
+    FULLMASK=4294967295
+    BIT=1
+    for bit in `seq 32 -1 0`; do
+       if test $FULLMASK -eq $INT; then PREFIX=$bit; return; fi
+       FULLMASK=$(($FULLMASK-$BIT))
+       BIT=$((BIT*2))
+    done
+    echo "ERROR converting netmask $mask to prefix"
+    exit 1
+}
 
 # Usage: transfer_addrs src dst
 # Copy all IP addresses (including aliases) from device $src to device $dst.
@@ -99,6 +118,7 @@
 s@\([0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+\)/[0-9]\+@\1@
 s/${src}/dev ${src}/
 " | sh -e
+    ip link set dev ${dst} up
 }
 
 # Usage: transfer_routes src dst
@@ -131,11 +151,11 @@
 
     # Don't create the bridge if it already exists.
     if ! brctl show | grep -q ${bridge} ; then
-        brctl addbr ${bridge}
-        brctl stp ${bridge} off
-        brctl setfd ${bridge} 0
-    fi
-    ifconfig ${bridge} up
+       brctl addbr ${bridge}
+       brctl stp ${bridge} off
+       brctl setfd ${bridge} 0
+    fi
+    ip link set ${bridge} up
 }
 
 # Usage: add_to_bridge bridge dev
@@ -144,19 +164,17 @@
     local dev=$2
     # Don't add $dev to $bridge if it's already on a bridge.
     if ! brctl show | grep -q ${dev} ; then
-        brctl addif ${bridge} ${dev}
-    fi
-}
-
-# Usage: antispoofing dev bridge
+       brctl addif ${bridge} ${dev}
+    fi
+}
+
 # Set the default forwarding policy for $dev to drop.
 # Allow forwarding to the bridge.
 antispoofing () {
-    local dev=$1
-    local bridge=$2
-
     iptables -P FORWARD DROP
-    iptables -A FORWARD -m physdev --physdev-in ${dev} -j ACCEPT
+    iptables -F FORWARD
+    iptables -A FORWARD -m physdev --physdev-in ${pdev} -j ACCEPT
+    iptables -A FORWARD -m physdev --physdev-in ${vif0} -j ACCEPT
 }
 
 # Usage: show_status dev bridge
@@ -166,8 +184,10 @@
     local bridge=$2
     
     echo '============================================================'
-    ifconfig ${dev}
-    ifconfig ${bridge}
+    ip addr show ${dev}
+    ip addr show ${bridge}
+    echo ' '
+    brctl show ${bridge}
     echo ' '
     ip route list
     echo ' '
@@ -177,100 +197,113 @@
 
 op_start () {
     if [ "${bridge}" == "null" ] ; then
-        return
+       return
     fi
 
     create_bridge ${bridge}
 
-    if ifconfig 2>/dev/null | grep -q veth0 ; then
-        return
-    fi
-
-    if ifconfig veth0 2>/dev/null | grep -q veth0 ; then
-       mac=`ifconfig ${netdev} | grep HWadd | sed -e 
's/.*\(..:..:..:..:..:..\).*/\1/'`
-       if ! ifdown ${netdev} ; then
-               # if ifup didn't work, see if we have an ip= on cmd line
-               if egrep 'ip=[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:' /proc/cmdline ; 
-               then
-                        kip=`sed -e 
's!.*ip=\([0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+\):.*!\1!' /proc/cmdline`
-                        kmask=`sed -e 
's!.*ip=[^:]*:[^:]*:[^:]*:\([^:]*\):.*!\1!' /proc/cmdline` 
-                        kgate=`sed -e 's!.*ip=[^:]*:[^:]*:\([^:]*\):.*!\1!' 
/proc/cmdline`
-                       ifconfig ${netdev} 0.0.0.0 down
+    if ! ip link show 2>/dev/null | grep -q "^[0-9]*: ${vdev}"; then
+       return
+    fi
+
+    if ip link show ${vdev} 2>/dev/null >/dev/null; then
+       mac=`ip link show ${netdev} | grep 'link\/ether' | sed -e 's/.*ether 
\(..:..:..:..:..:..\).*/\1/'`
+       eval `/sbin/getcfg -d /etc/sysconfig/network/ -f ifcfg- -- ${netdev}`
+       transfer_addrs ${netdev} ${vdev}
+       if ! ifdown ${netdev}; then
+           # if ifup didn't work, see if we have an ip= on cmd line
+           if egrep 'ip=[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:' /proc/cmdline; then
+               kip=`sed -e 
's!.*ip=\([0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+\):.*!\1!' /proc/cmdline`
+               kmask=`sed -e 's!.*ip=[^:]*:[^:]*:[^:]*:\([^:]*\):.*!\1!' 
/proc/cmdline` 
+               kgate=`sed -e 's!.*ip=[^:]*:[^:]*:\([^:]*\):.*!\1!' 
/proc/cmdline`
+               ip link set ${netdev} down
+               ip addr flush ${netdev}
+           fi
+       fi
+       ip link set ${netdev} name ${pdev}
+       ip link set ${vdev} name ${netdev}
+       ip link set ${pdev} down arp off
+       ip link set ${pdev} addr fe:ff:ff:ff:ff:ff
+       ip addr flush ${pdev}
+       ip link set ${netdev} addr ${mac} arp on
+       add_to_bridge ${bridge} ${vif0}
+       add_to_bridge ${bridge} ${pdev}
+       ip link set ${bridge} up
+       ip link set ${vif0} up
+       ip link set ${pdev} up 
+       if ! ifup ${HWD_CONFIG_0} ${netdev} ; then
+           if [ ${kip} ] ; then
+               # use the addresses we grocked from /proc/cmdline
+               if [ -z "${kmask}" ]; then 
+                   PREFIX=32; 
+               else 
+                   legacy_to_prefix ${kmask};
                fi
+               ip addr add ${kip}/${PREFIX} dev ${netdev}
+               ip link set dev ${netdev} up
+               [ ${kgate} ] && ip route add default via ${kgate}
+           fi
        fi
-       ip link set ${netdev} name p${netdev}
-       ip link set veth0 name ${netdev}
-       ifconfig p${netdev} 0.0.0.0 -arp down
-       ifconfig p${netdev} hw ether fe:ff:ff:ff:ff:ff
-       ifconfig ${netdev} hw ether ${mac}
-       add_to_bridge ${bridge} vif0.0
-       add_to_bridge ${bridge} p${netdev}
-       ip link set ${bridge} up
-       ip link set vif0.0 up
-       ip link set p${netdev} up
-       if ! ifup ${netdev} ; then
-               if [ ${kip} ] ; then
-                       # use the addresses we grocked from /proc/cmdline       
-                       ifconfig ${netdev} ${kip} 
-                       [ ${kmask} ] && ifconfig ${netdev} netmask ${kmask} 
-                       ifconfig ${netdev} up
-                       [ ${kgate} ] && ip route add default via ${kgate}       
-               fi
-        fi
     else
-       # old style without veth0
-       transfer_addrs ${netdev} ${bridge}
-        transfer_routes ${netdev} ${bridge}
-    fi
-    
+       # old style without ${vdev}
+       transfer_addrs  ${netdev} ${bridge}
+       transfer_routes ${netdev} ${bridge}
+    fi
+
     if [ ${antispoof} == 'yes' ] ; then
-        antispoofing ${netdev} ${bridge}
+       antispoofing
     fi
 }
 
 op_stop () {
-    if [ "${bridge}" == "null" ] ; then
-        return
-    fi
-
-    if ifconfig peth0 2>/dev/null | grep -q peth0 ; then
-
-        ifconfig vif0.0 down
-        mac=`ifconfig eth0 | grep HWadd | \
-            sed -e 's/.*\(..:..:..:..:..:..\).*/\1/'`
-        ifconfig ${netdev} 0.0.0.0 down
-        ifconfig ${netdev} hw ether fe:ff:ff:ff:ff:ff
-
-        ifconfig p${netdev} down
-        ifconfig p${netdev} hw ether ${mac} arp 
-        brctl delif ${bridge} p${netdev}
-
-        ip link set eth0 name veth0
-        ip link set peth0 name eth0
-        ifconfig ${bridge} down
-        brctl delbr ${bridge}
-        ifup eth0
+    if [ "${bridge}" == "null" ]; then
+       return
+    fi
+    if ! ip link show ${bridge} >/dev/null 2>&1; then
+       return
+    fi
+
+    if ip link show ${pdev} 2>/dev/null >/dev/null; then
+       ip link set dev ${vif0} down
+       mac=`ip link show ${netdev} | grep 'link\/ether' | sed -e 's/.*ether 
\(..:..:..:..:..:..\).*/\1/'`
+       transfer_addrs ${netdev} ${pdev}
+       ifdown ${netdev}
+       ip link set ${netdev} down arp off
+       ip link set ${netdev} addr fe:ff:ff:ff:ff:ff
+       ip link set ${pdev} down
+       ip addr flush ${netdev}
+       ip link set ${pdev} addr ${mac} arp on
+
+       brctl delif ${bridge} ${pdev}
+       brctl delif ${bridge} ${vif0}
+       ip link set ${bridge} down
+
+       ip link set ${netdev} name ${vdev}
+       ip link set ${pdev} name ${netdev}
+       ifup ${netdev}
 
     else
-        transfer_routes ${bridge} ${netdev}
-    fi
+       transfer_routes ${bridge} ${netdev}
+       ip link set ${bridge} down
+    fi
+    brctl delbr ${bridge}
 }
 
 case ${OP} in
     start)
-        op_start
-        ;;
+       op_start
+       ;;
     
     stop)
-        op_stop
-        ;;
+       op_stop
+       ;;
 
     status)
-        show_status ${netdev} ${bridge}
-       ;;
+       show_status ${netdev} ${bridge}
+       ;;
 
     *)
-       echo 'Unknown command: ' ${OP} >&2
-       echo 'Valid commands are: start, stop, status' >&2
-       exit 1
+       echo 'Unknown command: ' ${OP} >&2
+       echo 'Valid commands are: start, stop, status' >&2
+       exit 1
 esac
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/vif-bridge
--- a/tools/examples/vif-bridge Fri Oct 21 19:58:39 2005
+++ b/tools/examples/vif-bridge Mon Oct 24 15:08:13 2005
@@ -3,22 +3,19 @@
 # /etc/xen/vif-bridge
 #
 # Script for configuring a vif in bridged mode.
-# Xend calls a vif script when bringing a vif up or down.
-# This script is the default - but it can be configured for each vif.
-#
-# Example invocation:
-#
-# vif-bridge up domain=VM1 vif=vif1.0 bridge=xen-br0 ip="128.232.38.45/28 
10.10.10.55/24"
-#
+# The hotplugging system will call this script if it is specified either in
+# the device configuration given to Xend, or the default Xend configuration
+# in /etc/xen/xend-config.sxp.  If the script is specified in neither of those
+# places, then this script is the default.
 #
 # Usage:
-# vif-bridge (up|down) {VAR=VAL}*
+# vif-bridge (up|down)
 #
-# Vars:
+# Environment vars:
+# vif         vif interface name (required).
+# XENBUS_PATH path to this device's details in the XenStore (required).
 #
-# domain  name of the domain the interface is on (required).
-# vif     vif interface name (required).
-# mac     vif MAC address (required).
+# Read from the store:
 # bridge  bridge to add the vif to (required).
 # ip      list of IP networks for the vif, space-separated (optional).
 #
@@ -31,64 +28,30 @@
 # rules for its ip addresses (if any).
 #============================================================================
 
-# Exit if anything goes wrong
-set -e 
-export PATH=/sbin:/bin:/usr/bin:/usr/sbin:$PATH
+dir=$(dirname "$0")
+. "$dir/vif-common.sh"
 
-echo "*vif-bridge $*" >&2
+bridge=$(xenstore_read "$XENBUS_PATH/bridge")
 
-# Operation name.
-OP=$1
-shift
+case "$command" in
+    up)
+        if brctl show "$bridge" | grep "$vif" >&/dev/null
+        then
+          log debug "$vif already attached to $bridge"
+          exit 0
+        fi
 
-# Pull variables in args into environment
-for arg ; do export "${arg}" ; done
+        brctl addif "$bridge" "$vif" ||
+          fatal "brctl addif $bridge $vif failed"
 
-# Required parameters. Fail if not set.
-domain=${domain:?}
-vif=${vif:?}
-mac=${mac:?}
-bridge=${bridge:?}
-
-# Optional parameters. Set defaults.
-ip=${ip:-''}   # default to null (do nothing)
-
-# Are we going up or down?
-case $OP in
-    up)
-        brcmd='addif'
-        iptcmd='-A'
+        ifconfig "$vif" up || fatal "ifconfig $vif up failed"
         ;;
     down)
-        brcmd='delif'
-        iptcmd='-D'
-        ;;
-    *)
-        echo 'Invalid command: ' $OP >&2
-        echo 'Valid commands are: up, down' >&2
-        exit 1
+        # vifs are auto-removed from bridge.
+        ifconfig "$vif" down || fatal "ifconfig $vif down failed"
         ;;
 esac
 
-# Don't do anything if the bridge is "null".
-if [ "${bridge}" == "null" ] ; then
-    exit
-fi
+handle_iptable
 
-# Add vif to bridge. vifs are auto-removed from bridge.
-if [ "${brcmd}" == "addif" ] ; then
-    brctl ${brcmd} ${bridge} ${vif}
-fi
-ifconfig ${vif} $OP
-
-if [ "${ip}" ] ; then
-
-    # If we've been given a list of IP networks, allow pkts with these src 
addrs.
-    for addr in ${ip} ; do
-        iptables ${iptcmd} FORWARD -m physdev --physdev-in ${vif} -s ${addr} 
-j ACCEPT
-    done 
-
-    # Always allow us to talk to a DHCP server anyhow.
-    iptables ${iptcmd} FORWARD -m physdev --physdev-in ${vif} -p udp --sport 
68 --dport 67 -j ACCEPT
-fi
-
+log debug "vif-bridge operation for $vif successful."
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/vif-nat
--- a/tools/examples/vif-nat    Fri Oct 21 19:58:39 2005
+++ b/tools/examples/vif-nat    Mon Oct 24 15:08:13 2005
@@ -3,64 +3,54 @@
 # /etc/xen/vif-nat
 #
 # Script for configuring a vif in routed-nat mode.
-# Xend calls a vif script when bringing a vif up or down.
-# This script is the default - but it can be configured for each vif.
-#
-# Example invocation:
-#
-# vif-nat up domain=VM1 vif=vif1.0 ip="192.168.0.10/31"
+# The hotplugging system will call this script if it is specified either in
+# the device configuration given to Xend, or the default Xend configuration
+# in /etc/xen/xend-config.sxp.  If the script is specified in neither of those
+# places, then vif-bridge is the default.
 #
 # Usage:
-# vif-nat (up|down) {VAR=VAL}*
+# vif-nat (up|down)
 #
-# Vars:
+# Environment vars:
+# vif         vif interface name (required).
+# XENBUS_PATH path to this device's details in the XenStore (required).
 #
-# domain  name of the domain the interface is on (required).
-# vif     vif interface name (required).
-# ip      list of IP networks for the vif, space-separated (required).
+# Read from the store:
+# ip      list of IP networks for the vif, space-separated (default given in
+#         this script).
 #============================================================================
 
-# Exit if anything goes wrong
-set -e 
-export PATH=/sbin:/bin:/usr/bin:/usr/sbin:$PATH
-echo "*vif-nat $*" >&2
+dir=$(dirname "$0")
+. "$dir/vif-common.sh"
 
-# Operation name.
-OP=$1
-shift
+if [ "$ip" == "" ]
+then
+  ip='169.254.1.1/24'
+fi
 
-# Pull variables in args into environment
-for arg ; do export "${arg}" ; done
+#determine ip address and netmask 
+vif_ip=`echo ${ip} | awk -F/ '{print $1}'`
+bits=`echo ${ip} | awk -F/ '{print $2}'`
+intmask=$(( ((0xFFFFFFFF << ((32 - $bits)))) & 0xFFFFFFFF ))
+netmask=$(( (($intmask & 0xFF000000)) >> 24 ))
+netmask=$netmask.$(( (($intmask & 0x00FF0000)) >> 16 ))
+netmask=$netmask.$(( (($intmask & 0x0000FF00)) >> 8 ))
+netmask=$netmask.$(( $intmask & 0x000000FF ))
 
-# Required parameters. Fail if not set.
-domain=${domain:?}
-vif=${vif:?}
-ip=${ip:?} 
+main_ip=$(ip addr show eth0 | sed -e '/inet /!d;s/^.*inet \([^\s*]\)\s.*$/\1/')
 
-# strip /netmask
-vif_ip=`echo ${ip} | awk -F/ '{print $1}'`
-
-main_ip=`ifconfig eth0 | grep "inet addr:" | sed -e 's/.*inet 
addr:\(\w\w*\.\w\w*\.\w\w*\.\w\w*\).*/\1/'`
-
-# Are we going up or down?
-case $OP in
+case "$command" in
     up)
-        ifconfig ${vif} ${vif_ip} netmask 255.255.255.0 up
+        ifconfig ${vif} ${vif_ip} netmask ${netmask} up
         echo 1 >/proc/sys/net/ipv4/conf/${vif}/proxy_arp
-        iptcmd='-A'
         ipcmd='a'
         ;;
     down)
         ifconfig ${vif} down
-        iptcmd='-D'
         ipcmd='d'
-        ;;
-    *)
-        echo 'Invalid command: ' $OP >&2
-        echo 'Valid commands are: up, down' >&2
-        exit 1
         ;;
 esac
 
 ip r ${ipcmd} ${ip} dev ${vif} src ${main_ip}
-#    iptables ${iptcmd} FORWARD -m physdev --physdev-in ${vif} -p udp --sport 
68 --dport 67 -j ACCEPT
+
+handle_iptable()
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/vif-route
--- a/tools/examples/vif-route  Fri Oct 21 19:58:39 2005
+++ b/tools/examples/vif-route  Mon Oct 24 15:08:13 2005
@@ -3,74 +3,46 @@
 # /etc/xen/vif-route
 #
 # Script for configuring a vif in routed mode.
-# Xend calls a vif script when bringing a vif up or down.
-# This script is the default - but it can be configured for each vif.
-#
-# Example invocation:
-#
-# vif-route up domain=VM1 vif=vif1.0 ip="128.232.38.45/28 10.10.10.55/24"
+# The hotplugging system will call this script if it is specified either in
+# the device configuration given to Xend, or the default Xend configuration
+# in /etc/xen/xend-config.sxp.  If the script is specified in neither of those
+# places, then vif-bridge is the default.
 #
 # Usage:
-# vif-route (up|down) {VAR=VAL}*
+# vif-route (up|down)
 #
-# Vars:
+# Environment vars:
+# vif         vif interface name (required).
+# XENBUS_PATH path to this device's details in the XenStore (required).
 #
-# domain  name of the domain the interface is on (required).
-# vif     vif interface name (required).
-# mac     vif MAC address (required).
-# ip      list of IP networks for the vif, space-separated (optional).
+# Read from the store:
+# ip      list of IP networks for the vif, space-separated (default given in
+#         this script).
 #============================================================================
 
-# Exit if anything goes wrong
-set -e 
-export PATH=/sbin:/bin:/usr/bin:/usr/sbin:$PATH
-echo "*vif-route $*" >&2
+dir=$(dirname "$0")
+. "$dir/vif-common.sh"
 
-# Operation name.
-OP=$1
-shift
+main_ip=$(ip addr show eth0 | sed -e '/inet /!d;s/^.*inet \([^\s*]\)\s.*$/\1/')
 
-# Pull variables in args into environment
-for arg ; do export "${arg}" ; done
-
-# Required parameters. Fail if not set.
-domain=${domain:?}
-vif=${vif:?}
-mac=${mac:?}
-
-# Optional parameters. Set defaults.
-ip=${ip:-''}   # default to null (do nothing)
-
-main_ip=`ifconfig eth0 | grep "inet addr:" | sed -e 's/.*inet 
addr:\(\w\w*\.\w\w*\.\w\w*\.\w\w*\).*/\1/'`
-
-# Are we going up or down?
-case $OP in
+case "$command" in
     up)
-        ifconfig ${vif} 169.254.1.0 netmask 255.255.255.255 up
+        ifconfig ${vif} ${main_ip} netmask 255.255.255.255 up
         echo 1 >/proc/sys/net/ipv4/conf/${vif}/proxy_arp
-        iptcmd='-A'
         ipcmd='a'
         ;;
     down)
-        ifconfig ${vif} down
-        iptcmd='-D'
+        ifdown ${vif}
         ipcmd='d'
-        ;;
-    *)
-        echo 'Invalid command: ' $OP >&2
-        echo 'Valid commands are: up, down' >&2
-        exit 1
         ;;
 esac
 
 if [ "${ip}" ] ; then
-
-    # If we've been given a list of IP networks, allow pkts with these src 
addrs.
+    # If we've been given a list of IP addresses, then add routes from dom0 to
+    # the guest using those addresses.
     for addr in ${ip} ; do
       ip r ${ipcmd} ${addr} dev ${vif} src ${main_ip}
-#      iptables ${iptcmd} FORWARD -m physdev --physdev-in ${vif} -s ${addr} -j 
ACCEPT
     done 
+fi
 
-    # Always allow us to talk to a DHCP server anyhow.
-#    iptables ${iptcmd} FORWARD -m physdev --physdev-in ${vif} -p udp --sport 
68 --dport 67 -j ACCEPT
-fi
+handle_iptable()
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xen-backend.agent
--- a/tools/examples/xen-backend.agent  Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xen-backend.agent  Mon Oct 24 15:08:13 2005
@@ -1,9 +1,4 @@
 #! /bin/sh
-
-#ACTION=add
-#DEVPATH=/devices/xen-backend/vif-1-0
-#PHYSDEVDRIVER=vif
-#XENBUS_TYPE=vif
 
 PATH=/etc/xen/scripts:$PATH
 
@@ -20,12 +15,16 @@
       vbd)
        /etc/xen/scripts/block unbind
         ;;
+      vif)
+        [ -n "$script" ] && $script down
+        ;;
     esac
     # remove device backend store entries
-    xenstore-rm "$XENBUS_PATH"
+    xenstore-rm -t "$XENBUS_PATH"
+    xenstore-rm -t "error/$XENBUS_PATH"
     ;;
   online)
-    case "$PHYSDEVDRIVER" in
+    case "$XENBUS_TYPE" in
       vif)
         [ -n "$script" ] && $script up
         ;;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xen-backend.rules
--- a/tools/examples/xen-backend.rules  Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xen-backend.rules  Mon Oct 24 15:08:13 2005
@@ -1,4 +1,4 @@
 SUBSYSTEM=="xen-backend", KERNEL=="vbd*", ACTION=="add", 
RUN+="/etc/xen/scripts/block bind"
 SUBSYSTEM=="xen-backend", KERNEL=="vbd*", ACTION=="remove", 
RUN+="/etc/xen/scripts/block unbind"
-SUBSYSTEM=="xen-backend", KERNEL=="vif*", ENV{PHYSDEVDRIVER}=="vif", 
ACTION=="online", RUN+="$env{script} up"
-SUBSYSTEM=="xen-backend", ACTION=="remove", RUN+="/usr/bin/xenstore-rm 
$env{XENBUS_PATH}"
+SUBSYSTEM=="xen-backend", KERNEL=="vif*", ACTION=="online", RUN+="$env{script} 
up"
+SUBSYSTEM=="xen-backend", ACTION=="remove", RUN+="/usr/bin/xenstore-rm -t 
$env{XENBUS_PATH}"
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xen-hotplug-common.sh
--- a/tools/examples/xen-hotplug-common.sh      Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xen-hotplug-common.sh      Mon Oct 24 15:08:13 2005
@@ -1,6 +1,8 @@
 set -e
 
-export PATH=/sbin:/bin:/usr/bin:/usr/sbin:$PATH
+export PATH="/sbin:/bin:/usr/bin:/usr/sbin:$PATH"
+export LANG="POSIX"
+unset $(set | grep ^LC_ | cut -d= -f1)
 
 log() {
   local level="$1"
@@ -8,19 +10,20 @@
   logger -p "daemon.$level" -- "$0:" "$@" || echo "$0 $@" >&2
 }
 
+fatal() {
+  log err "$@"
+  exit 1
+}
+
 xenstore_read() {
   local v=$(xenstore-read "$@" || true)
-  if [ "$v" == "" ]
-  then
-    log error "xenstore-read $@ failed."
-    exit 1
-  fi
+  [ "$v" != "" ] || fatal "xenstore-read $@ failed."
   echo "$v"
 }
 
 xenstore_write() {
   log debug "Writing $@ to xenstore."
-  xenstore-write "$@" || log error "Writing $@ to xenstore failed."
+  xenstore-write "$@" || log err "Writing $@ to xenstore failed."
 }
 
 log debug "$@" "XENBUS_PATH=$XENBUS_PATH"
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xend-config.sxp
--- a/tools/examples/xend-config.sxp    Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xend-config.sxp    Mon Oct 24 15:08:13 2005
@@ -1,24 +1,52 @@
+# -*- sh -*-
+
+#
 # Xend configuration file.
+#
 
-# Port xend should use for the HTTP interface.
-(xend-port         8000)
+# This example configuration is appropriate for an installation that trusts
+# only localhost connections, and is otherwise fully functional, with a
+# bridged network configuration.
 
-# Port xend should use for the event interface.
-(xend-event-port   8001)
+# Commented out entries show the default for that entry, unless otherwise
+# specified.
 
-# Address xend should listen on for HTTP connections.
+#(logfile /var/log/xend.log)
+#(loglevel DEBUG)
+
+#(xend-http-server no)
+(xend-http-server yes)
+#(xend-unix-server yes)
+#(xend-relocation-server no)
+(xend-relocation-server yes)
+
+#(xend-unix-path /var/lib/xend/xend-socket)
+
+# Port xend should use for the HTTP interface, if xend-http-server is set.
+#(xend-port            8000)
+
+# Port xend should use for the event interface.  This interface is deprecated.
+#(xend-event-port      8001)
+
+# Port xend should use for the relocation interface, if xend-relocation-server
+# is set.
+#(xend-relocation-port 8002)
+
+# Address xend should listen on for HTTP connections, if xend-http-server is
+# set.
 # Specifying 'localhost' prevents remote connections.
-# Specifying the empty string '' allows all connections.
-(xend-address      'localhost')
+# Specifying the empty string '' (the default) allows all connections.
+#(xend-address '')
+(xend-address localhost)
 
-# The port xend should start from when allocating a port
-# for a domain console.
-(console-port-base 9600)
+# Address xend should listen on for relocation-socket connections, if
+# xend-relocation-server is set.
+# Meaning and default as for xend-address above.
+#(xend-relocation-address '')
+(xend-relocation-address localhost)
 
-# Address xend should listen on for console connections.
-# Specifying 'localhost' prevents remote connections.
-# Specifying the empty string '' allows all connections.
-(console-address   'localhost')
+# The limit (in kilobytes) on the size of the console buffer
+#(console-limit 1024)
 
 ## Use the following if VIF traffic is routed.
 # The script used to start/stop networking for xend.
@@ -30,19 +58,9 @@
 # The script used to start/stop networking for xend.
 (network-script    network-bridge)
 # The default bridge that virtual interfaces should be connected to.
-(vif-bridge        xen-br0)
+(vif-bridge        xenbr0)
 # The default script used to control virtual interfaces.
 (vif-script        vif-bridge)
-
-# Whether iptables should be set up to prevent IP spoofing for
-# virtual interfaces. Specify 'yes' or 'no'.
-(vif-antispoof     no)
-
-# Setup script for file-backed block devices
-(block-file block-file)
-
-# Setup script for enbd-backed block devices
-(block-enbd block-enbd)
 
 # Dom0 will balloon out when needed to free memory for domU.
 # dom0-min-mem is the lowest memory level (in MB) dom0 will get down to.
@@ -52,3 +70,6 @@
 # In SMP system, dom0 will use dom0-cpus # of CPUS
 # If dom0-cpus = 0, dom0 will take all cpus available
 (dom0-cpus 0)
+
+# Whether to enable core-dumps when domains crash.
+#(enable-dump no)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xmexample.vmx
--- a/tools/examples/xmexample.vmx      Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xmexample.vmx      Mon Oct 24 15:08:13 2005
@@ -35,7 +35,7 @@
 
 # Optionally define mac and/or bridge for the network interfaces.
 # Random MACs are assigned if not given.
-#vif = [ 'mac=aa:00:00:00:00:11, bridge=xen-br0' ]
+#vif = [ 'mac=aa:00:00:00:00:11, bridge=xenbr0' ]
 
 #----------------------------------------------------------------------------
 # Define the disk devices you want the domain to have access to, and
@@ -117,6 +117,11 @@
 #nographic=0
 
 
+#----------------------------------------------------------------------------
+# enable ne2000, default = 0(use pcnet)
+ne2000=0
+
+
 #-----------------------------------------------------------------------------
 #   enable audio support
 #enable-audio=1
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xmexample1
--- a/tools/examples/xmexample1 Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xmexample1 Mon Oct 24 15:08:13 2005
@@ -36,7 +36,7 @@
 
 # Optionally define mac and/or bridge for the network interfaces.
 # Random MACs are assigned if not given.
-#vif = [ 'mac=aa:00:00:00:00:11, bridge=xen-br0' ]
+#vif = [ 'mac=aa:00:00:00:00:11, bridge=xenbr0' ]
 
 #----------------------------------------------------------------------------
 # Define the disk devices you want the domain to have access to, and
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xmexample2
--- a/tools/examples/xmexample2 Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xmexample2 Mon Oct 24 15:08:13 2005
@@ -67,7 +67,7 @@
 
 # Optionally define mac and/or bridge for the network interfaces.
 # Random MACs are assigned if not given.
-#vif = [ 'mac=aa:00:00:00:00:11, bridge=xen-br0' ]
+#vif = [ 'mac=aa:00:00:00:00:11, bridge=xenbr0' ]
 
 #----------------------------------------------------------------------------
 # Define the disk devices you want the domain to have access to, and
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/acpi/acpi2_0.h
--- a/tools/firmware/acpi/acpi2_0.h     Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/acpi/acpi2_0.h     Mon Oct 24 15:08:13 2005
@@ -18,7 +18,21 @@
 #ifndef _ACPI_2_0_H_
 #define _ACPI_2_0_H_
 
-#include "xenctrl.h"  // for u8, u16, u32, u64 definition
+typedef unsigned char  uint8_t;
+typedef   signed char  int8_t;
+typedef unsigned short uint16_t;
+typedef   signed short int16_t;
+typedef unsigned int   uint32_t;
+typedef   signed int   int32_t;
+#ifdef __i386__
+typedef unsigned long long uint64_t;
+typedef   signed long long int64_t;
+#else
+typedef unsigned long uint64_t;
+typedef   signed long int64_t;
+#endif
+
+#include <xen/xen.h>
 
 #pragma pack (1)
 
@@ -27,15 +41,15 @@
 //
 
 typedef struct {
-               u32                     Signature;
-               u32             Length;
-               u8                      Revision;
-               u8                      Checksum;
-               u8                      OemId[6];
-               u64                     OemTableId;
-               u32                     OemRevision;
-               u32                     CreatorId;
-               u32                     CreatorRevision;
+               uint32_t                        Signature;
+               uint32_t        Length;
+               uint8_t                 Revision;
+               uint8_t                 Checksum;
+               uint8_t                 OemId[6];
+               uint64_t                        OemTableId;
+               uint32_t                        OemRevision;
+               uint32_t                        CreatorId;
+               uint32_t                        CreatorRevision;
 } ACPI_TABLE_HEADER;
 
 
@@ -49,11 +63,11 @@
 // ACPI 2.0 Generic Address Space definition
 //
 typedef struct {
-               u8                            AddressSpaceId;
-               u8                            RegisterBitWidth;
-               u8                            RegisterBitOffset;
-               u8                            Reserved;
-               u64                           Address;
+               uint8_t                            AddressSpaceId;
+               uint8_t                            RegisterBitWidth;
+               uint8_t                            RegisterBitOffset;
+               uint8_t                            Reserved;
+               uint64_t                           Address;
 } ACPI_GENERIC_ADDRESS_STRUCTURE;
 
 //
@@ -70,11 +84,11 @@
 // Root System Description Pointer Structure in ACPI 1.0
 //
 typedef struct {
-               u64                           Signature;
-               u8                            Checksum;
-               u8                            OemId[6];
-               u8                            Reserved;
-               u32                           RsdtAddress;
+               uint64_t                           Signature;
+               uint8_t                            Checksum;
+               uint8_t                            OemId[6];
+               uint8_t                            Reserved;
+               uint32_t                           RsdtAddress;
 } ACPI_1_0_RSDP;
 
 
@@ -82,15 +96,15 @@
 // Root System Description Pointer Structure
 //
 typedef struct {
-               u64                           Signature;
-               u8                            Checksum;
-               u8                            OemId[6];
-               u8                            Revision;
-               u32                           RsdtAddress;
-               u32                           Length;
-               u64                           XsdtAddress;
-               u8                            ExtendedChecksum;
-               u8                            Reserved[3];
+               uint64_t                           Signature;
+               uint8_t                            Checksum;
+               uint8_t                            OemId[6];
+               uint8_t                            Revision;
+               uint32_t                           RsdtAddress;
+               uint32_t                           Length;
+               uint64_t                           XsdtAddress;
+               uint8_t                            ExtendedChecksum;
+               uint8_t                            Reserved[3];
 } ACPI_2_0_RSDP;
 
 
@@ -105,7 +119,7 @@
 
 typedef struct {
                ACPI_TABLE_HEADER Header;
-               u32 Entry[ACPI_MAX_NUM_TABLES];
+               uint32_t Entry[ACPI_MAX_NUM_TABLES];
 }ACPI_2_0_RSDT;
 
 //
@@ -120,7 +134,7 @@
 
 typedef struct _ACPI_2_0_XSDT{
                ACPI_TABLE_HEADER Header;
-               u64 Entry[ACPI_MAX_NUM_TABLES];
+               uint64_t Entry[ACPI_MAX_NUM_TABLES];
 }ACPI_2_0_XSDT;
 #define ACPI_2_0_XSDT_REVISION 0x01
 
@@ -130,49 +144,49 @@
 
 typedef struct  {
                ACPI_TABLE_HEADER               Header;
-               u32                                    FirmwareCtrl;
-               u32                                    Dsdt;
-               u8                                     Reserved0;
-               u8                                     PreferredPmProfile;
-               u16                                    SciInt;
-               u32                                    SmiCmd;
-               u8                                     AcpiEnable;
-               u8                                     AcpiDisable;
-               u8                                     S4BiosReq;
-               u8                                     PstateCnt;
-               u32                                    Pm1aEvtBlk;
-               u32                                    Pm1bEvtBlk;
-               u32                                    Pm1aCntBlk;
-               u32                                    Pm1bCntBlk;
-               u32                                    Pm2CntBlk;
-               u32                                    PmTmrBlk;
-               u32                                    Gpe0Blk;
-               u32                                    Gpe1Blk;
-               u8                                     Pm1EvtLen;
-               u8                                     Pm1CntLen;
-               u8                                     Pm2CntLen;
-               u8                                     PmTmrLen;
-               u8                                     Gpe0BlkLen;
-               u8                                     Gpe1BlkLen;
-               u8                                     Gpe1Base;
-               u8                                     CstCnt;
-               u16                                    PLvl2Lat;
-               u16                                    PLvl3Lat;
-               u16                                    FlushSize;
-               u16                                    FlushStride;
-               u8                                     DutyOffset;
-               u8                                     DutyWidth;
-               u8                                     DayAlrm;
-               u8                                     MonAlrm;
-               u8                                     Century;
-               u16                                    IaPcBootArch;
-               u8                                     Reserved1;
-               u32                                    Flags;
+               uint32_t                                    FirmwareCtrl;
+               uint32_t                                    Dsdt;
+               uint8_t                                     Reserved0;
+               uint8_t                                     PreferredPmProfile;
+               uint16_t                                    SciInt;
+               uint32_t                                    SmiCmd;
+               uint8_t                                     AcpiEnable;
+               uint8_t                                     AcpiDisable;
+               uint8_t                                     S4BiosReq;
+               uint8_t                                     PstateCnt;
+               uint32_t                                    Pm1aEvtBlk;
+               uint32_t                                    Pm1bEvtBlk;
+               uint32_t                                    Pm1aCntBlk;
+               uint32_t                                    Pm1bCntBlk;
+               uint32_t                                    Pm2CntBlk;
+               uint32_t                                    PmTmrBlk;
+               uint32_t                                    Gpe0Blk;
+               uint32_t                                    Gpe1Blk;
+               uint8_t                                     Pm1EvtLen;
+               uint8_t                                     Pm1CntLen;
+               uint8_t                                     Pm2CntLen;
+               uint8_t                                     PmTmrLen;
+               uint8_t                                     Gpe0BlkLen;
+               uint8_t                                     Gpe1BlkLen;
+               uint8_t                                     Gpe1Base;
+               uint8_t                                     CstCnt;
+               uint16_t                                    PLvl2Lat;
+               uint16_t                                    PLvl3Lat;
+               uint16_t                                    FlushSize;
+               uint16_t                                    FlushStride;
+               uint8_t                                     DutyOffset;
+               uint8_t                                     DutyWidth;
+               uint8_t                                     DayAlrm;
+               uint8_t                                     MonAlrm;
+               uint8_t                                     Century;
+               uint16_t                                    IaPcBootArch;
+               uint8_t                                     Reserved1;
+               uint32_t                                    Flags;
                ACPI_GENERIC_ADDRESS_STRUCTURE    ResetReg;
-               u8                                     ResetValue;
-               u8                                     Reserved2[3];
-               u64                                    XFirmwareCtrl;
-               u64                                    XDsdt;
+               uint8_t                                     ResetValue;
+               uint8_t                                     Reserved2[3];
+               uint64_t                                    XFirmwareCtrl;
+               uint64_t                                    XDsdt;
                ACPI_GENERIC_ADDRESS_STRUCTURE    XPm1aEvtBlk;
                ACPI_GENERIC_ADDRESS_STRUCTURE    XPm1bEvtBlk;
                ACPI_GENERIC_ADDRESS_STRUCTURE    XPm1aCntBlk;
@@ -212,15 +226,15 @@
 // Firmware ACPI Control Structure (FACS)
 //
 typedef struct {
-               u32                               Signature;
-               u32                               Length;
-               u32                               HardwareSignature;
-               u32                               FirmwareWakingVector;
-               u32                               GlobalLock;
-               u32                               Flags;
-               u64                               XFirmwareWakingVector;
-               u8                                Version;
-               u8                                Reserved[31];
+               uint32_t                               Signature;
+               uint32_t                               Length;
+               uint32_t                               HardwareSignature;
+               uint32_t                               FirmwareWakingVector;
+               uint32_t                               GlobalLock;
+               uint32_t                               Flags;
+               uint64_t                               XFirmwareWakingVector;
+               uint8_t                                Version;
+               uint8_t                                Reserved[31];
 } ACPI_2_0_FACS;
 
 #define ACPI_2_0_FACS_VERSION 0x01
@@ -230,8 +244,8 @@
 //
 typedef struct {
                ACPI_TABLE_HEADER                       Header;
-               u32                                     LocalApicAddress;
-               u32                                     Flags;
+               uint32_t                                     LocalApicAddress;
+               uint32_t                                     Flags;
 } ACPI_2_0_MADT;
 
 #define ACPI_2_0_MADT_REVISION 0x01
@@ -263,11 +277,11 @@
 //
 
 typedef struct {
-               u8                                             Type;
-               u8                                             Length;
-               u8                                             AcpiProcessorId;
-               u8                                             ApicId;
-               u32                                            Flags;
+               uint8_t                                             Type;
+               uint8_t                                             Length;
+               uint8_t                                             
AcpiProcessorId;
+               uint8_t                                             ApicId;
+               uint32_t                                            Flags;
 } ACPI_LOCAL_APIC_STRUCTURE;
 
 //
@@ -281,12 +295,12 @@
 //
 
 typedef struct {
-               u8                                             Type;
-               u8                                             Length;
-               u8                                             IoApicId;
-               u8                                             Reserved;
-               u32                                            IoApicAddress;
-               u32                                            
GlobalSystemInterruptBase;
+               uint8_t                                             Type;
+               uint8_t                                             Length;
+               uint8_t                                             IoApicId;
+               uint8_t                                             Reserved;
+               uint32_t                                            
IoApicAddress;
+               uint32_t                                            
GlobalSystemInterruptBase;
 } ACPI_IO_APIC_STRUCTURE;
 
 // Tabel Signature
@@ -312,6 +326,6 @@
 #define ACPI_TABLE_SIZE (2*1024)  //Currently 2K is enough
 
 void
-AcpiBuildTable(u8* buf);
+AcpiBuildTable(uint8_t* buf);
 
 #endif
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/acpi/acpi_build.c
--- a/tools/firmware/acpi/acpi_build.c  Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/acpi/acpi_build.c  Mon Oct 24 15:08:13 2005
@@ -37,21 +37,21 @@
                ACPI_MULTIPLE_APIC_DESCRIPTION_TABLE *Madt;
                ACPI_2_0_FACS *Facs;
                unsigned char* Dsdt;
-               u32 RsdpOffset;
-               u32 RsdtOffset;
-               u32 XsdtOffset;
-               u32 FadtOffset;
-               u32 MadtOffset;
-               u32 FacsOffset;
-               u32 DsdtOffset;
+               uint32_t RsdpOffset;
+               uint32_t RsdtOffset;
+               uint32_t XsdtOffset;
+               uint32_t FadtOffset;
+               uint32_t MadtOffset;
+               uint32_t FacsOffset;
+               uint32_t DsdtOffset;
 }ACPI_TABLE_ALL;
 
 static 
 void
 MemCopy(void* src, void* dst, int len){
 
-       u8* src0=src;
-       u8* dst0=dst;   
+       uint8_t* src0=src;
+       uint8_t* dst0=dst;      
 
        while(len--){
                *(dst0++)=*(src0++);
@@ -62,8 +62,8 @@
 void
 SetCheckSum(
   void*  Table, 
-  u32 ChecksumOffset,
-  u32 Length
+  uint32_t ChecksumOffset,
+  uint32_t Length
 )
 /*
  * Routine Description:
@@ -76,23 +76,23 @@
  *     Length:         Length of Table
  */
 {
-       u8 Sum = 0;  
-       u8 *Ptr;
+       uint8_t Sum = 0;  
+       uint8_t *Ptr;
 
        Ptr=Table;
        Ptr[ChecksumOffset]=0;
        while (Length--) {    
-               Sum = (u8)(Sum + (*Ptr++));
+               Sum = (uint8_t)(Sum + (*Ptr++));
        }
        
        Ptr = Table;
-       Ptr[ChecksumOffset] = (u8) (0xff - Sum + 1);
+       Ptr[ChecksumOffset] = (uint8_t) (0xff - Sum + 1);
 }
 
 //
 //  FIELD_OFFSET - returns the byte offset to a field within a structure
 //
-#define FIELD_OFFSET(TYPE,Field) ((u32)(&(((TYPE *) 0)->Field)))
+#define FIELD_OFFSET(TYPE,Field) ((uint32_t)(&(((TYPE *) 0)->Field)))
 
 static
 void
@@ -106,9 +106,9 @@
  */
 {    
        // RSDP Update  
-       table->Rsdp->RsdtAddress = (u32)(ACPI_PHYSICAL_ADDRESS+
+       table->Rsdp->RsdtAddress = (uint32_t)(ACPI_PHYSICAL_ADDRESS+
                                        table->RsdtOffset);
-       table->Rsdp->XsdtAddress = (u64)(ACPI_PHYSICAL_ADDRESS+
+       table->Rsdp->XsdtAddress = (uint64_t)(ACPI_PHYSICAL_ADDRESS+
                                        table->XsdtOffset);
        SetCheckSum(table->Rsdp,
                                        FIELD_OFFSET(ACPI_1_0_RSDP, Checksum),
@@ -122,37 +122,37 @@
 
        
        //RSDT Update
-       table->Rsdt->Entry[0] = (u32)(ACPI_PHYSICAL_ADDRESS + 
+       table->Rsdt->Entry[0] = (uint32_t)(ACPI_PHYSICAL_ADDRESS + 
                                        table->FadtOffset);     
-       table->Rsdt->Entry[1] = (u32)(ACPI_PHYSICAL_ADDRESS + 
+       table->Rsdt->Entry[1] = (uint32_t)(ACPI_PHYSICAL_ADDRESS + 
                                        table->MadtOffset);
        table->Rsdt->Header.Length = sizeof (ACPI_TABLE_HEADER) +
-                                       2*sizeof(u32);
+                                       2*sizeof(uint32_t);
        SetCheckSum(table->Rsdt,
                                        FIELD_OFFSET(ACPI_TABLE_HEADER, 
Checksum),
                                        table->Rsdt->Header.Length
                           );   
        
        //XSDT  Update
-       table->Xsdt->Entry[0] = (u64)(ACPI_PHYSICAL_ADDRESS +
+       table->Xsdt->Entry[0] = (uint64_t)(ACPI_PHYSICAL_ADDRESS +
                                        table->FadtOffset);
-       table->Xsdt->Entry[1] = (u64)(ACPI_PHYSICAL_ADDRESS + 
+       table->Xsdt->Entry[1] = (uint64_t)(ACPI_PHYSICAL_ADDRESS + 
                                        table->MadtOffset);     
        table->Xsdt->Header.Length = sizeof (ACPI_TABLE_HEADER) + 
-                                       2*sizeof(u64);
+                                       2*sizeof(uint64_t);
        SetCheckSum(table->Xsdt,
                                        FIELD_OFFSET(ACPI_TABLE_HEADER, 
Checksum),
                                        table->Xsdt->Header.Length
                           );
 
        // FADT Update
-       table->Fadt->Dsdt = (u32)(ACPI_PHYSICAL_ADDRESS + 
+       table->Fadt->Dsdt = (uint32_t)(ACPI_PHYSICAL_ADDRESS + 
                                        table->DsdtOffset);     
-       table->Fadt->XDsdt = (u64)(ACPI_PHYSICAL_ADDRESS + 
+       table->Fadt->XDsdt = (uint64_t)(ACPI_PHYSICAL_ADDRESS + 
                                   table->DsdtOffset);
-       table->Fadt->FirmwareCtrl = (u32)(ACPI_PHYSICAL_ADDRESS +
+       table->Fadt->FirmwareCtrl = (uint32_t)(ACPI_PHYSICAL_ADDRESS +
                                        table->FacsOffset);
-       table->Fadt->XFirmwareCtrl = (u64)(ACPI_PHYSICAL_ADDRESS + 
+       table->Fadt->XFirmwareCtrl = (uint64_t)(ACPI_PHYSICAL_ADDRESS + 
                                        table->FacsOffset);     
        SetCheckSum(table->Fadt,
                                        FIELD_OFFSET(ACPI_TABLE_HEADER, 
Checksum),
@@ -167,7 +167,7 @@
 }
 
 void
-AcpiBuildTable(u8* buf)
+AcpiBuildTable(uint8_t* buf)
 /*
  * Copy all the ACPI table to buffer
  * Buffer Layout:
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/acpi/acpi_gen.c
--- a/tools/firmware/acpi/acpi_gen.c    Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/acpi/acpi_gen.c    Mon Oct 24 15:08:13 2005
@@ -16,44 +16,38 @@
  *
  */
 #include "acpi2_0.h"
-#include "stdio.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
 
-/*
- * Generate acpi table
- * write acpi table to binary: acpitable.bin
- *
- */
- 
-#define USAGE "Usage: acpi_gen filename \n" \
-                         "       generage acpitable and write to the binary 
\n" \
-                         "       filename - the binary name\n"
+#define USAGE  "Usage: acpi_gen filename \n"                           \
+               "       generage acpitable and write to the binary \n"  \
+               "       filename - the binary name\n"
 
+int main(int argc, char **argv)
+{
+       char *filename;
+       char  buf[ACPI_TABLE_SIZE] = { 0 };
+       FILE *f;
 
-int main(int argc, char** argv){
-               char* filename;
-               char  buf[ACPI_TABLE_SIZE];
-               FILE* f=NULL;
-               int i;
+       if (argc < 2) {
+               fprintf(stderr,"%s",USAGE);
+               exit(1);
+       }
 
-               for (i=0; i<ACPI_TABLE_SIZE; i++){
-                               buf[i]=0;
-               }
+       filename = argv[1];
+               
+       if ((f = fopen(filename, "w+")) == NULL) {
+               fprintf(stderr,"Can not open %s", filename);
+               exit(1);
+       }
 
-               if (argc<2){
-                               fprintf(stderr,"%s",USAGE);
-                               exit(1);
-               }
+       AcpiBuildTable((uint8_t *)buf);
 
-               filename = argv[1];
-               
-               if(!(f=fopen(filename, "w+"))){
-                               fprintf(stderr,"Can not open %s",filename);
-                               exit(1);
-               }               
-        AcpiBuildTable(buf);
-               if (fwrite(buf, ACPI_TABLE_SIZE, 1, f)<1){
-                               fprintf(stderr,"Can not write to 
%s\n",filename);
-                               exit(1);
-               }
-               return 0;               
+       if (fwrite(buf, ACPI_TABLE_SIZE, 1, f) < 1) {
+               fprintf(stderr,"Can not write to %s\n", filename);
+               exit(1);
+       }
+
+       return 0;
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/vgabios/Makefile
--- a/tools/firmware/vgabios/Makefile   Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/vgabios/Makefile   Mon Oct 24 15:08:13 2005
@@ -33,7 +33,7 @@
        cp VGABIOS-lgpl-latest.cirrus.debug.bin ../$(RELEASE).cirrus.debug.bin
        tar czvf ../$(RELEASE).tgz --exclude CVS -C .. $(RELEASE)/
 
-vgabios.bin: vgabios.c vgabios.h vgafonts.h vgatables.h vbe.h vbe.c vbetables.h
+vgabios.bin: biossums vgabios.c vgabios.h vgafonts.h vgatables.h vbe.h vbe.c 
vbetables.h
        $(GCC) -E -P vgabios.c $(VGABIOS_VERS) $(VGABIOS_DATE) > _vgabios_.c
        $(BCC) -o vgabios.s -C-c -D__i86__ -S -0 _vgabios_.c
        sed -e 's/^\.text//' -e 's/^\.data//' vgabios.s > _vgabios_.s
@@ -43,7 +43,7 @@
        ./biossums VGABIOS-lgpl-latest.bin
        ls -l VGABIOS-lgpl-latest.bin
 
-vgabios.debug.bin: vgabios.c vgabios.h vgafonts.h vgatables.h vbe.h vbe.c 
vbetables.h
+vgabios.debug.bin: biossums vgabios.c vgabios.h vgafonts.h vgatables.h vbe.h 
vbe.c vbetables.h
        $(GCC) -E -P vgabios.c $(VGABIOS_VERS) -DDEBUG $(VGABIOS_DATE) > 
_vgabios-debug_.c
        $(BCC) -o vgabios-debug.s -C-c -D__i86__ -S -0 _vgabios-debug_.c
        sed -e 's/^\.text//' -e 's/^\.data//' vgabios-debug.s > 
_vgabios-debug_.s
@@ -53,7 +53,7 @@
        ./biossums VGABIOS-lgpl-latest.debug.bin
        ls -l VGABIOS-lgpl-latest.debug.bin
 
-vgabios-cirrus.bin: vgabios.c vgabios.h vgafonts.h vgatables.h clext.c
+vgabios-cirrus.bin: biossums vgabios.c vgabios.h vgafonts.h vgatables.h clext.c
        $(GCC) -E -P vgabios.c $(VGABIOS_VERS) -DCIRRUS $(VGABIOS_DATE) > 
_vgabios-cirrus_.c
        $(BCC) -o vgabios-cirrus.s -C-c -D__i86__ -S -0 _vgabios-cirrus_.c
        sed -e 's/^\.text//' -e 's/^\.data//' vgabios-cirrus.s > 
_vgabios-cirrus_.s
@@ -63,7 +63,7 @@
        ./biossums VGABIOS-lgpl-latest.cirrus.bin
        ls -l VGABIOS-lgpl-latest.cirrus.bin
 
-vgabios-cirrus.debug.bin: vgabios.c vgabios.h vgafonts.h vgatables.h clext.c
+vgabios-cirrus.debug.bin: biossums vgabios.c vgabios.h vgafonts.h vgatables.h 
clext.c
        $(GCC) -E -P vgabios.c $(VGABIOS_VERS) -DCIRRUS -DCIRRUS_DEBUG 
$(VGABIOS_DATE) > _vgabios-cirrus-debug_.c
        $(BCC) -o vgabios-cirrus-debug.s -C-c -D__i86__ -S -0 
_vgabios-cirrus-debug_.c
        sed -e 's/^\.text//' -e 's/^\.data//' vgabios-cirrus-debug.s > 
_vgabios-cirrus-debug_.s
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/vmxassist/Makefile
--- a/tools/firmware/vmxassist/Makefile Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/vmxassist/Makefile Mon Oct 24 15:08:13 2005
@@ -24,14 +24,14 @@
 # The emulator code lives in ROM space
 TEXTADDR=0x000D0000
 
-DEFINES=-DDEBUG -DTEXTADDR=${TEXTADDR}
+DEFINES=-DDEBUG -D_ACPI_ -DTEXTADDR=$(TEXTADDR)
 XENINC=-I$(XEN_ROOT)/tools/libxc
 
 LD       = ld
 CC       = gcc
 CPP      = cpp -P
 OBJCOPY  = objcopy -p -O binary -R .note -R .comment -R .bss -S --gap-fill=0
-CFLAGS   = ${DEFINES} -I. $(XENINC) -Wall -fno-builtin -O2 -msoft-float
+CFLAGS   = $(DEFINES) -I. $(XENINC) -Wall -fno-builtin -O2 -msoft-float
 CFLAGS  += -m32 -march=i686
 LDFLAGS  = -m elf_i386
 
@@ -40,33 +40,33 @@
 all: vmxloader
 
 vmxloader: roms.h vmxloader.c acpi.h acpi_madt.c
-       ${CC} ${CFLAGS} ${DEFINES} -c vmxloader.c -c acpi_madt.c
+       $(CC) $(CFLAGS) $(DEFINES) -c vmxloader.c -c acpi_madt.c
        $(CC) -o vmxloader.tmp -m32 -nostdlib -Wl,-N -Wl,-Ttext -Wl,0x100000 
vmxloader.o acpi_madt.o
        objcopy vmxloader.tmp vmxloader
        rm -f vmxloader.tmp
 
-vmxassist.bin: vmxassist.ld ${OBJECTS}
-       ${CPP} ${DEFINES} vmxassist.ld > vmxassist.tmp
-       ${LD} -o vmxassist ${LDFLAGS} -nostdlib --fatal-warnings -N -T 
vmxassist.tmp ${OBJECTS}
+vmxassist.bin: vmxassist.ld $(OBJECTS)
+       $(CPP) $(DEFINES) vmxassist.ld > vmxassist.tmp
+       $(LD) -o vmxassist $(LDFLAGS) -nostdlib --fatal-warnings -N -T 
vmxassist.tmp $(OBJECTS)
        nm -n vmxassist > vmxassist.sym
-       ${OBJCOPY} vmxassist vmxassist.tmp
+       $(OBJCOPY) vmxassist vmxassist.tmp
        dd if=vmxassist.tmp of=vmxassist.bin ibs=512 conv=sync
        rm -f vmxassist.tmp
 
 head.o: machine.h head.S
-       ${CC} ${CFLAGS} -D__ASSEMBLY__ ${DEFINES} -c head.S
+       $(CC) $(CFLAGS) -D__ASSEMBLY__ $(DEFINES) -c head.S
 
 trap.o: machine.h offsets.h trap.S
-       ${CC} ${CFLAGS} -D__ASSEMBLY__ ${DEFINES} -c trap.S
+       $(CC) $(CFLAGS) -D__ASSEMBLY__ $(DEFINES) -c trap.S
 
 vm86.o: machine.h vm86.c
-       ${CC} ${CFLAGS} -c vm86.c
+       $(CC) $(CFLAGS) -c vm86.c
 
 setup.o: machine.h setup.c
-       ${CC} ${CFLAGS} -c setup.c
+       $(CC) $(CFLAGS) -c setup.c
 
 util.o: machine.h util.c
-       ${CC} ${CFLAGS} -c util.c
+       $(CC) $(CFLAGS) -c util.c
 
 roms.h:        ../rombios/BIOS-bochs-latest ../vgabios/VGABIOS-lgpl-latest.bin 
../vgabios/VGABIOS-lgpl-latest.cirrus.bin vmxassist.bin
        ./mkhex rombios ../rombios/BIOS-bochs-latest > roms.h
@@ -81,10 +81,10 @@
        ./gen > offsets.h
 
 gen:   gen.c
-       ${CC} ${CFLAGS} -o gen gen.c
+       $(HOSTCC) $(HOSTCFLAGS) -I. $(XENINC) -o gen gen.c
 
 clean:
        rm -f vmxassist vmxassist.tmp vmxassist.bin vmxassist.run vmxassist.sym 
head.s roms.h acpi.h
-       rm -f vmxloader vmxloader.tmp vmxloader.o ${OBJECTS}
+       rm -f vmxloader vmxloader.tmp vmxloader.o $(OBJECTS)
        rm -f gen gen.o offsets.h
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/vmxassist/acpi_madt.c
--- a/tools/firmware/vmxassist/acpi_madt.c      Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/vmxassist/acpi_madt.c      Mon Oct 24 15:08:13 2005
@@ -24,17 +24,19 @@
 
 extern int puts(const char *s);
 
-#define VCPU_MAGIC 0x76637075 /* "vcpu" */
+#define VCPU_NR_PAGE        0x0009F000
+#define VCPU_NR_OFFSET      0x00000800
+#define VCPU_MAGIC          0x76637075  /* "vcpu" */
 
 /* xc_vmx_builder wrote vcpu block at 0x9F800. Return it. */
-static int 
-get_vcpus(void)
+static int
+get_vcpu_nr(void)
 {
-       unsigned long *vcpus;
+       unsigned int *vcpus;
 
-       vcpus = (unsigned long *)0x9F800;
+       vcpus = (unsigned int *)(VCPU_NR_PAGE + VCPU_NR_OFFSET);
        if (vcpus[0] != VCPU_MAGIC) {
-               puts("Bad vcpus magic, set vcpu number=1\n");
+               puts("Bad vcpus magic, set vcpu number to 1 by default.\n");
                return 1;
        }
 
@@ -123,7 +125,7 @@
        if (!madt)
                return -1;
 
-       rc = acpi_madt_set_local_apics(get_vcpus(), madt);
+       rc = acpi_madt_set_local_apics(get_vcpu_nr(), madt);
        if (rc != 0)
                return rc;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/vmxassist/gen.c
--- a/tools/firmware/vmxassist/gen.c    Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/vmxassist/gen.c    Mon Oct 24 15:08:13 2005
@@ -27,26 +27,26 @@
 {
        printf("/* MACHINE GENERATED; DO NOT EDIT */\n");
        printf("#define VMX_ASSIST_CTX_GS_SEL   0x%x\n",
-               offsetof(struct vmx_assist_context, gs_sel));
+               (unsigned int)offsetof(struct vmx_assist_context, gs_sel));
        printf("#define VMX_ASSIST_CTX_FS_SEL   0x%x\n",
-               offsetof(struct vmx_assist_context, fs_sel));
+               (unsigned int)offsetof(struct vmx_assist_context, fs_sel));
        printf("#define VMX_ASSIST_CTX_DS_SEL   0x%x\n",
-               offsetof(struct vmx_assist_context, ds_sel));
+               (unsigned int)offsetof(struct vmx_assist_context, ds_sel));
        printf("#define VMX_ASSIST_CTX_ES_SEL   0x%x\n",
-               offsetof(struct vmx_assist_context, es_sel));
+               (unsigned int)offsetof(struct vmx_assist_context, es_sel));
        printf("#define VMX_ASSIST_CTX_SS_SEL   0x%x\n",
-               offsetof(struct vmx_assist_context, ss_sel));
+               (unsigned int)offsetof(struct vmx_assist_context, ss_sel));
        printf("#define VMX_ASSIST_CTX_ESP      0x%x\n",
-               offsetof(struct vmx_assist_context, esp));
+               (unsigned int)offsetof(struct vmx_assist_context, esp));
        printf("#define VMX_ASSIST_CTX_EFLAGS   0x%x\n",
-               offsetof(struct vmx_assist_context, eflags));
+               (unsigned int)offsetof(struct vmx_assist_context, eflags));
        printf("#define VMX_ASSIST_CTX_CS_SEL   0x%x\n",
-               offsetof(struct vmx_assist_context, cs_sel));
+               (unsigned int)offsetof(struct vmx_assist_context, cs_sel));
        printf("#define VMX_ASSIST_CTX_EIP      0x%x\n",
-               offsetof(struct vmx_assist_context, eip));
+               (unsigned int)offsetof(struct vmx_assist_context, eip));
 
        printf("#define VMX_ASSIST_CTX_CR0      0x%x\n",
-               offsetof(struct vmx_assist_context, cr0));
+               (unsigned int)offsetof(struct vmx_assist_context, cr0));
 
        return 0;
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/vmxassist/vm86.c
--- a/tools/firmware/vmxassist/vm86.c   Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/vmxassist/vm86.c   Mon Oct 24 15:08:13 2005
@@ -465,7 +465,7 @@
  * Emulate a segment load in protected mode
  */
 int
-load_seg(unsigned long sel, u32 *base, u32 *limit, union vmcs_arbytes *arbytes)
+load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union 
vmcs_arbytes *arbytes)
 {
        unsigned long long entry;
 
@@ -784,7 +784,6 @@
                                }
                                break;
                        case 0x09: /* wbinvd */
-                               asm volatile ( "wbinvd" );
                                return OPC_EMULATED;
                        case 0x20: /* mov Rd, Cd (1h) */
                        case 0x22:
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/firmware/vmxassist/vm86.h
--- a/tools/firmware/vmxassist/vm86.h   Fri Oct 21 19:58:39 2005
+++ b/tools/firmware/vmxassist/vm86.h   Mon Oct 24 15:08:13 2005
@@ -22,15 +22,6 @@
 
 #ifndef __ASSEMBLY__
 #include <stdint.h>
-
-typedef uint8_t            u8;
-typedef uint16_t           u16;
-typedef uint32_t           u32;
-typedef uint64_t           u64;
-typedef int8_t             s8;
-typedef int16_t            s16;
-typedef int32_t            s32;
-typedef int64_t            s64;
 #endif
 
 #include <xen/vmx_assist.h>
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/exec-all.h
--- a/tools/ioemu/exec-all.h    Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/exec-all.h    Mon Oct 24 15:08:13 2005
@@ -573,7 +573,7 @@
 }
 #endif
 
-#define DEBUG_UNUSED_IOPORT
-#define DEBUG_IOPORT
+//#define DEBUG_UNUSED_IOPORT
+//#define DEBUG_IOPORT
 #define TARGET_VMX
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/hw/cirrus_vga_rop2.h
--- a/tools/ioemu/hw/cirrus_vga_rop2.h  Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/hw/cirrus_vga_rop2.h  Mon Oct 24 15:08:13 2005
@@ -47,6 +47,11 @@
     int x, y, pattern_y, pattern_pitch, pattern_x;
     unsigned int col;
     const uint8_t *src1;
+#if DEPTH == 24
+    int skipleft = s->gr[0x2f] & 0x1f;
+#else
+    int skipleft = (s->gr[0x2f] & 0x07) * (DEPTH / 8);
+#endif
 
 #if DEPTH == 8
     pattern_pitch = 8;
@@ -56,11 +61,11 @@
     pattern_pitch = 32;
 #endif
     pattern_y = s->cirrus_blt_srcaddr & 7;
-    pattern_x = 0;
-    for(y = 0; y < bltheight; y++) {
-        d = dst;
+    for(y = 0; y < bltheight; y++) {
+        pattern_x = skipleft;
+        d = dst + skipleft;
         src1 = src + pattern_y * pattern_pitch;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        for (x = skipleft; x < bltwidth; x += (DEPTH / 8)) {
 #if DEPTH == 8
             col = src1[pattern_x];
             pattern_x = (pattern_x + 1) & 7;
@@ -99,7 +104,13 @@
     unsigned int col;
     unsigned bitmask;
     unsigned index;
-    int srcskipleft = 0;
+#if DEPTH == 24
+    int dstskipleft = s->gr[0x2f] & 0x1f;
+    int srcskipleft = dstskipleft / 3;
+#else
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
+#endif
 
     if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) {
         bits_xor = 0xff;
@@ -112,8 +123,8 @@
     for(y = 0; y < bltheight; y++) {
         bitmask = 0x80 >> srcskipleft;
         bits = *src++ ^ bits_xor;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             if ((bitmask & 0xff) == 0) {
                 bitmask = 0x80;
                 bits = *src++ ^ bits_xor;
@@ -142,15 +153,16 @@
     unsigned bits;
     unsigned int col;
     unsigned bitmask;
-    int srcskipleft = 0;
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
 
     colors[0] = s->cirrus_blt_bgcol;
     colors[1] = s->cirrus_blt_fgcol;
     for(y = 0; y < bltheight; y++) {
         bitmask = 0x80 >> srcskipleft;
         bits = *src++;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             if ((bitmask & 0xff) == 0) {
                 bitmask = 0x80;
                 bits = *src++;
@@ -175,6 +187,13 @@
     int x, y, bitpos, pattern_y;
     unsigned int bits, bits_xor;
     unsigned int col;
+#if DEPTH == 24
+    int dstskipleft = s->gr[0x2f] & 0x1f;
+    int srcskipleft = dstskipleft / 3;
+#else
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
+#endif
 
     if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) {
         bits_xor = 0xff;
@@ -187,9 +206,9 @@
 
     for(y = 0; y < bltheight; y++) {
         bits = src[pattern_y] ^ bits_xor;
-        bitpos = 7;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        bitpos = 7 - srcskipleft;
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             if ((bits >> bitpos) & 1) {
                 PUTPIXEL();
             }
@@ -213,6 +232,8 @@
     int x, y, bitpos, pattern_y;
     unsigned int bits;
     unsigned int col;
+    int srcskipleft = s->gr[0x2f] & 0x07;
+    int dstskipleft = srcskipleft * (DEPTH / 8);
 
     colors[0] = s->cirrus_blt_bgcol;
     colors[1] = s->cirrus_blt_fgcol;
@@ -220,9 +241,9 @@
 
     for(y = 0; y < bltheight; y++) {
         bits = src[pattern_y];
-        bitpos = 7;
-        d = dst;
-        for (x = 0; x < bltwidth; x += (DEPTH / 8)) {
+        bitpos = 7 - srcskipleft;
+        d = dst + dstskipleft;
+        for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) {
             col = colors[(bits >> bitpos) & 1];
             PUTPIXEL();
             d += (DEPTH / 8);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/hw/i8254.c
--- a/tools/ioemu/hw/i8254.c    Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/hw/i8254.c    Mon Oct 24 15:08:13 2005
@@ -222,9 +222,7 @@
     int irq, i;
     PITChannelState *s;
 
-    /* Assumes PIT is wired to IRQ0 and -1 is uninitialized irq base */
-    if ((irq = pic_irq2vec(0)) == -1)
-        return;
+    irq = 0;
 
     for(i = 0; i < 3; i++) {
         if (pit_state.channels[i].vmx_channel)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/hw/i8259.c
--- a/tools/ioemu/hw/i8259.c    Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/hw/i8259.c    Mon Oct 24 15:08:13 2005
@@ -29,7 +29,7 @@
 //#define DEBUG_PIC
 
 //#define DEBUG_IRQ_LATENCY
-#define DEBUG_IRQ_COUNT
+//#define DEBUG_IRQ_COUNT
 
 extern void pit_reset_vmx_vectors();
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/hw/ioapic.c
--- a/tools/ioemu/hw/ioapic.c   Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/hw/ioapic.c   Mon Oct 24 15:08:13 2005
@@ -493,7 +493,7 @@
     }
 }
 
-static inline int __fls(u32 word)
+static inline int __fls(uint32_t word)
 {
     int bit;
     __asm__("bsrl %1,%0"
@@ -581,7 +581,7 @@
     ioapic_set_irq(ioapic, irq, level);
 }
 
-static inline int find_highest_bit(u32 *data, int length){
+static inline int find_highest_bit(uint32_t *data, int length){
         while(length && !data[--length]);
             return __fls(data[length]) +  32 * length;
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/hw/pc.c
--- a/tools/ioemu/hw/pc.c       Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/hw/pc.c       Mon Oct 24 15:08:13 2005
@@ -541,10 +541,10 @@
 
     if (pci_enabled) {
         for(i = 0; i < nb_nics; i++) {
-            if (nic_pcnet)
-                pci_pcnet_init(pci_bus, &nd_table[i]);
+            if (nic_ne2000)
+                pci_ne2000_init(pci_bus, &nd_table[i]);
             else
-                pci_ne2000_init(pci_bus, &nd_table[i]); 
+                pci_pcnet_init(pci_bus, &nd_table[i]); 
         }
         pci_piix3_ide_init(pci_bus, bs_table);
 #ifdef APIC_SUPPORT
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/hw/pckbd.c
--- a/tools/ioemu/hw/pckbd.c    Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/hw/pckbd.c    Mon Oct 24 15:08:13 2005
@@ -29,6 +29,9 @@
 /* debug PC keyboard : only mouse */
 //#define DEBUG_MOUSE
 
+/* enable synapatic touchpad device model */
+//#define SYNAPTIC
+
 /*     Keyboard Controller Commands */
 #define KBD_CCMD_READ_MODE     0x20    /* Read mode bits */
 #define KBD_CCMD_WRITE_MODE    0x60    /* Write mode bits */
@@ -117,10 +120,12 @@
     int rptr, wptr, count;
 } KBDQueue;
 
+#ifdef SYNAPTIC
 typedef struct {
     int absolute;
     int high;
 } TouchPad;
+#endif
 
 typedef struct KBDState {
     KBDQueue queue;
@@ -142,7 +147,9 @@
     int mouse_dy;
     int mouse_dz;
     uint8_t mouse_buttons;
+#ifdef SYNAPTIC
     TouchPad touchpad;
+#endif
 } KBDState;
 
 KBDState kbd_state;
@@ -399,6 +406,7 @@
     dx1 = s->mouse_dx;
     dy1 = s->mouse_dy;
     dz1 = s->mouse_dz;
+#ifdef SYNAPTIC
     if (s->touchpad.absolute)
     {
        int dz2, dleftnright, dg, df;
@@ -444,6 +452,7 @@
        kbd_queue(s, dy1 & 0xFF, 1);
        return;
     }
+#endif
     /* XXX: increase range to 8 bits ? */
     if (dx1 > 127)
         dx1 = 127;
@@ -516,9 +525,11 @@
 
 static void kbd_write_mouse(KBDState *s, int val)
 {
+#ifdef SYNAPTIC
 /* variables needed to store synaptics command info */
 static int rr = 0, ss = 0, tt = 0, uu = 0, res_count = 0, last_com = 0;
 int spare;
+#endif
 #ifdef DEBUG_MOUSE
     printf("kbd: write mouse 0x%02x\n", val);
 #endif
@@ -536,7 +547,9 @@
                 return;
             }
         }
+#ifdef SYNAPTIC
        last_com = val;
+#endif
         switch(val) {
         case AUX_SET_SCALE11:
             s->mouse_status &= ~MOUSE_STATUS_SCALE21;
@@ -568,6 +581,7 @@
             kbd_queue(s, AUX_ACK, 1);
             break;
         case AUX_GET_SCALE:
+#ifdef SYNAPTIC
            if (res_count == 4)
            {
                    /* time for the special stuff */
@@ -681,6 +695,7 @@
                    }
            }
            else
+#endif
            {
                    /* not a special command, just do the regular stuff */
             kbd_queue(s, AUX_ACK, 1);
@@ -705,14 +720,18 @@
             s->mouse_sample_rate = 100;
             s->mouse_resolution = 2;
             s->mouse_status = 0;
+#ifdef SYNAPTIC
                    s->touchpad.absolute = 0;
+#endif
             kbd_queue(s, AUX_ACK, 1);
             break;
         case AUX_RESET:
             s->mouse_sample_rate = 100;
             s->mouse_resolution = 2;
             s->mouse_status = 0;
+#ifdef SYNAPTIC
            s->touchpad.absolute = 0;
+#endif
             kbd_queue(s, AUX_ACK, 1);
             kbd_queue(s, 0xaa, 1);
             kbd_queue(s, s->mouse_type, 1);
@@ -722,6 +741,7 @@
         }
         break;
     case AUX_SET_SAMPLE:
+#ifdef SYNAPTIC
        if (res_count == 4 && val == 0x14)
        {
                /* time for the special stuff */
@@ -729,6 +749,7 @@
                val = (rr*64) + (ss*16) + (tt*4) + uu;
                /* TODO: set the mode byte */
        } else
+#endif
         s->mouse_sample_rate = val;
 #if 0
         /* detect IMPS/2 or IMEX */
@@ -762,6 +783,7 @@
         s->mouse_write_cmd = -1;
         break;
     case AUX_SET_RES:
+#ifdef SYNAPTIC
        if (last_com != AUX_SET_RES)
        {
                /* if its not 4 in a row, its not a command */
@@ -790,6 +812,7 @@
                        uu = val;
                        break;
        }
+#endif
         s->mouse_resolution = val;
         kbd_queue(s, AUX_ACK, 1);
         s->mouse_write_cmd = -1;
@@ -871,8 +894,10 @@
     qemu_put_be32s(f, &s->mouse_dy);
     qemu_put_be32s(f, &s->mouse_dz);
     qemu_put_8s(f, &s->mouse_buttons);
+#ifdef SYNAPTIC
     qemu_put_be32s(f, &s->touchpad.absolute);
     qemu_put_be32s(f, &s->touchpad.high);
+#endif
 }
 
 static int kbd_load(QEMUFile* f, void* opaque, int version_id)
@@ -897,8 +922,10 @@
     qemu_get_be32s(f, &s->mouse_dy);
     qemu_get_be32s(f, &s->mouse_dz);
     qemu_get_8s(f, &s->mouse_buttons);
+#ifdef SYNAPTIC
     qemu_get_be32s(f, &s->touchpad.absolute);
     qemu_get_be32s(f, &s->touchpad.high);
+#endif
     return 0;
 }
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/monitor.c
--- a/tools/ioemu/monitor.c     Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/monitor.c     Mon Oct 24 15:08:13 2005
@@ -232,6 +232,161 @@
     exit(0);
 }
 
+typedef struct {
+    int keycode;
+    const char *name;
+} KeyDef;
+
+static const KeyDef key_defs[] = {
+    { 0x2a, "shift" },
+    { 0x36, "shift_r" },
+    
+    { 0x38, "alt" },
+    { 0xb8, "alt_r" },
+    { 0x1d, "ctrl" },
+    { 0x9d, "ctrl_r" },
+
+    { 0xdd, "menu" },
+
+    { 0x01, "esc" },
+
+    { 0x02, "1" },
+    { 0x03, "2" },
+    { 0x04, "3" },
+    { 0x05, "4" },
+    { 0x06, "5" },
+    { 0x07, "6" },
+    { 0x08, "7" },
+    { 0x09, "8" },
+    { 0x0a, "9" },
+    { 0x0b, "0" },
+    { 0x0e, "backspace" },
+
+    { 0x0f, "tab" },
+    { 0x10, "q" },
+    { 0x11, "w" },
+    { 0x12, "e" },
+    { 0x13, "r" },
+    { 0x14, "t" },
+    { 0x15, "y" },
+    { 0x16, "u" },
+    { 0x17, "i" },
+    { 0x18, "o" },
+    { 0x19, "p" },
+
+    { 0x1c, "ret" },
+
+    { 0x1e, "a" },
+    { 0x1f, "s" },
+    { 0x20, "d" },
+    { 0x21, "f" },
+    { 0x22, "g" },
+    { 0x23, "h" },
+    { 0x24, "j" },
+    { 0x25, "k" },
+    { 0x26, "l" },
+
+    { 0x2c, "z" },
+    { 0x2d, "x" },
+    { 0x2e, "c" },
+    { 0x2f, "v" },
+    { 0x30, "b" },
+    { 0x31, "n" },
+    { 0x32, "m" },
+    
+    { 0x39, "spc" },
+    { 0x3a, "caps_lock" },
+    { 0x3b, "f1" },
+    { 0x3c, "f2" },
+    { 0x3d, "f3" },
+    { 0x3e, "f4" },
+    { 0x3f, "f5" },
+    { 0x40, "f6" },
+    { 0x41, "f7" },
+    { 0x42, "f8" },
+    { 0x43, "f9" },
+    { 0x44, "f10" },
+    { 0x45, "num_lock" },
+    { 0x46, "scroll_lock" },
+
+    { 0x56, "<" },
+
+    { 0x57, "f11" },
+    { 0x58, "f12" },
+
+    { 0xb7, "print" },
+
+    { 0xc7, "home" },
+    { 0xc9, "pgup" },
+    { 0xd1, "pgdn" },
+    { 0xcf, "end" },
+
+    { 0xcb, "left" },
+    { 0xc8, "up" },
+    { 0xd0, "down" },
+    { 0xcd, "right" },
+
+    { 0xd2, "insert" },
+    { 0xd3, "delete" },
+    { 0, NULL },
+};
+
+static int get_keycode(const char *key)
+{
+    const KeyDef *p;
+
+    for(p = key_defs; p->name != NULL; p++) {
+        if (!strcmp(key, p->name))
+            return p->keycode;
+    }
+    return -1;
+}
+
+static void do_send_key(const char *string)
+{
+    char keybuf[16], *q;
+    uint8_t keycodes[16];
+    const char *p;
+    int nb_keycodes, keycode, i;
+    
+    nb_keycodes = 0;
+    p = string;
+    while (*p != '\0') {
+        q = keybuf;
+        while (*p != '\0' && *p != '-') {
+            if ((q - keybuf) < sizeof(keybuf) - 1) {
+                *q++ = *p;
+            }
+            p++;
+        }
+        *q = '\0';
+        keycode = get_keycode(keybuf);
+        if (keycode < 0) {
+            term_printf("unknown key: '%s'\n", keybuf);
+            return;
+        }
+        keycodes[nb_keycodes++] = keycode;
+        if (*p == '\0')
+            break;
+        p++;
+    }
+    /* key down events */
+    for(i = 0; i < nb_keycodes; i++) {
+        keycode = keycodes[i];
+        if (keycode & 0x80)
+            kbd_put_keycode(0xe0);
+        kbd_put_keycode(keycode & 0x7f);
+    }
+    /* key up events */
+    for(i = nb_keycodes - 1; i >= 0; i--) {
+        keycode = keycodes[i];
+        if (keycode & 0x80)
+            kbd_put_keycode(0xe0);
+        kbd_put_keycode(keycode | 0x80);
+    }
+}
+
+
 static int eject_device(BlockDriverState *bs, int force)
 {
     if (bdrv_is_inserted(bs)) {
@@ -331,6 +486,8 @@
       "item1[,...]", "activate logging of the specified items to 
'/tmp/qemu.log'" },
     { "q|quit", "", do_quit,
       "", "quit the emulator" },
+    { "sendkey", "s", do_send_key, 
+      "keys", "send keys to the VM (e.g. 'sendkey ctrl-alt-f1')" },
     { NULL, NULL, }, 
 };
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/target-i386-dm/Makefile
--- a/tools/ioemu/target-i386-dm/Makefile       Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/target-i386-dm/Makefile       Mon Oct 24 15:08:13 2005
@@ -271,7 +271,7 @@
 
 # Hardware support
 VL_OBJS+= ide.o ne2000.o pckbd.o vga.o dma.o
-VL_OBJS+= fdc.o mc146818rtc.o serial.o i8259.o i8254.o pc.o port-e9.o
+VL_OBJS+= fdc.o mc146818rtc.o serial.o i8259_stub.o i8254.o pc.o port-e9.o
 VL_OBJS+= cirrus_vga.o pcnet.o
 
 ifeq ($(TARGET_ARCH), ppc)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/target-i386-dm/helper2.c      Mon Oct 24 15:08:13 2005
@@ -122,7 +122,7 @@
 //the evtchn fd for polling
 int evtchn_fd = -1;
 //the evtchn port for polling the notification, should be inputed as bochs's 
parameter
-u16 ioreq_remote_port, ioreq_local_port;
+uint16_t ioreq_remote_port, ioreq_local_port;
 
 //some functions to handle the io req packet
 void
@@ -157,9 +157,9 @@
 ioreq_t* cpu_get_ioreq(void)
 {
        int rc;
-       u16 buf[2];
-       rc = read(evtchn_fd, buf, 2);
-       if (rc == 2 && buf[0] == ioreq_local_port){//got only one matched 16bit 
port index
+       uint16_t port;
+       rc = read(evtchn_fd, &port, sizeof(port));
+       if ((rc == sizeof(port)) && (port == ioreq_local_port)) {
                // unmask the wanted port again
                write(evtchn_fd, &ioreq_local_port, 2);
 
@@ -208,13 +208,13 @@
                                    int len, int is_write);
 
 static inline void
-read_physical(u64 addr, unsigned long size, void *val)
+read_physical(uint64_t addr, unsigned long size, void *val)
 {
         return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
 }
 
 static inline void
-write_physical(u64 addr, unsigned long size, void *val)
+write_physical(uint64_t addr, unsigned long size, void *val)
 {
         return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
 }
@@ -404,20 +404,6 @@
 }
 
 void
-do_interrupt(CPUState *env, int vector)
-{
-       unsigned long *intr;
-
-       // Send a message on the event channel. Add the vector to the shared mem
-       // page.
-       intr = (unsigned long *) &(shared_page->sp_global.pic_intr[0]);
-       atomic_set_bit(vector, intr);
-        if (loglevel & CPU_LOG_INT)
-                fprintf(logfile, "injecting vector: %x\n", vector);
-       env->send_event = 1;
-}
-
-void
 destroy_vmx_domain(void)
 {
     extern FILE* logfile;
@@ -429,7 +415,6 @@
 
 int main_loop(void)
 {
-       int vector;
        fd_set rfds;
        struct timeval tv;
        extern CPUState *global_env;
@@ -476,11 +461,6 @@
                ioapic_update_EOI();
 #endif
                cpu_timer_handler(env);
-               if (env->interrupt_request & CPU_INTERRUPT_HARD) {
-                        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
-                       vector = cpu_get_pic_interrupt(env); 
-                       do_interrupt(env, vector);
-               }
 #ifdef APIC_SUPPORT
                if (ioapic_has_intr())
                     do_ioapic();
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/target-i386-dm/qemu-ifup
--- a/tools/ioemu/target-i386-dm/qemu-ifup      Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/target-i386-dm/qemu-ifup      Mon Oct 24 15:08:13 2005
@@ -7,4 +7,4 @@
 echo $*
 
 ifconfig $1 0.0.0.0 up
-brctl addif xen-br0 $1
+brctl addif xenbr0 $1
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/vl.c  Mon Oct 24 15:08:13 2005
@@ -125,7 +125,7 @@
 QEMUTimer *polling_timer;
 int vm_running;
 int audio_enabled = 0;
-int nic_pcnet = 1;
+int nic_ne2000 = 0;
 int vcpus = 1;
 int sb16_enabled = 1;
 int adlib_enabled = 1;
@@ -2130,7 +2130,7 @@
            "-prep           Simulate a PREP system (default is PowerMAC)\n"
            "-g WxH[xDEPTH]  Set the initial VGA graphic mode\n"
 #endif
-           "-nic-pcnet     simulate an AMD PC-Net PCI ethernet adaptor\n"
+           "-nic-ne2000     simulate an Realtek ne2k PCI ethernet adaptor\n"
            "\n"
            "Network options:\n"
            "-nics n         simulate 'n' network cards [default=1]\n"
@@ -2247,7 +2247,7 @@
     QEMU_OPTION_no_code_copy,
     QEMU_OPTION_vcpus,
     QEMU_OPTION_pci,
-    QEMU_OPTION_nic_pcnet,
+    QEMU_OPTION_nic_ne2000,
     QEMU_OPTION_isa,
     QEMU_OPTION_prep,
     QEMU_OPTION_k,
@@ -2334,7 +2334,7 @@
     
     /* temporary options */
     { "pci", 0, QEMU_OPTION_pci },
-    { "nic-pcnet", 0, QEMU_OPTION_nic_pcnet },
+    { "nic-ne2000", 0, QEMU_OPTION_nic_ne2000 },
     { "cirrusvga", 0, QEMU_OPTION_cirrusvga },
     { "vgaacc", HAS_ARG, QEMU_OPTION_vgaacc },
     { NULL },
@@ -2382,7 +2382,7 @@
 }
 
 int
-setup_mapping(int xc_handle, u32 dom, unsigned long toptab, unsigned long  
*mem_page_array, unsigned long *page_table_array, unsigned long v_start, 
unsigned long v_end)
+setup_mapping(int xc_handle, uint32_t dom, unsigned long toptab, unsigned long 
 *mem_page_array, unsigned long *page_table_array, unsigned long v_start, 
unsigned long v_end)
 {
     l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
     l2_pgentry_t *vl2tab[4] = {NULL, NULL, NULL, NULL};
@@ -2449,7 +2449,7 @@
 }
 
 void
-unsetup_mapping(int xc_handle, u32 dom, unsigned long toptab, unsigned long 
v_start, unsigned long v_end)
+unsetup_mapping(int xc_handle, uint32_t dom, unsigned long toptab, unsigned 
long v_start, unsigned long v_end)
 {
     l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
     l2_pgentry_t *vl2tab[4], *vl2e=NULL, *vl2_table = NULL;
@@ -2806,7 +2806,7 @@
 
             case QEMU_OPTION_p:
                 {
-                  extern u16 ioreq_remote_port;
+                  extern uint16_t ioreq_remote_port;
                   ioreq_remote_port = atoi(optarg);
                   printf("port: %d\n", ioreq_remote_port);
                 }
@@ -2839,8 +2839,8 @@
             case QEMU_OPTION_pci:
                 pci_enabled = 1;
                 break;
-            case QEMU_OPTION_nic_pcnet:
-                nic_pcnet = 1;
+            case QEMU_OPTION_nic_ne2000:
+                nic_ne2000 = 1;
                 break;
             case QEMU_OPTION_isa:
                 pci_enabled = 0;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/vl.h
--- a/tools/ioemu/vl.h  Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/vl.h  Mon Oct 24 15:08:13 2005
@@ -602,7 +602,7 @@
 
 /* pcnet.c */
 
-extern int nic_pcnet;
+extern int nic_ne2000;
 
 void pci_pcnet_init(PCIBus *bus, NetDriverState *nd);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_bvtsched.c
--- a/tools/libxc/xc_bvtsched.c Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_bvtsched.c Mon Oct 24 15:08:13 2005
@@ -39,10 +39,10 @@
 }
 
 int xc_bvtsched_domain_set(int xc_handle,
-                           u32 domid,
-                           u32 mcuadv,
+                           uint32_t domid,
+                           uint32_t mcuadv,
                            int warpback,
-                           s32 warpvalue,
+                           int32_t warpvalue,
                            long long warpl,
                            long long warpu)
 {
@@ -64,10 +64,10 @@
 
 
 int xc_bvtsched_domain_get(int xc_handle,
-                           u32 domid,
-                           u32 *mcuadv,
+                           uint32_t domid,
+                           uint32_t *mcuadv,
                            int *warpback,
-                           s32 *warpvalue,
+                           int32_t *warpvalue,
                            long long *warpl,
                            long long *warpu)
 {
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c     Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_core.c     Mon Oct 24 15:08:13 2005
@@ -11,7 +11,7 @@
 
 static int
 copy_from_domain_page(int xc_handle,
-                      u32 domid,
+                      uint32_t domid,
                       unsigned long *page_array,
                       unsigned long src_pfn,
                       void *dst_page)
@@ -27,16 +27,16 @@
 
 int 
 xc_domain_dumpcore(int xc_handle,
-                   u32 domid,
+                   uint32_t domid,
                    const char *corename)
 {
     unsigned long nr_pages;
     unsigned long *page_array;
     xc_dominfo_t info;
-    int i, j, vcpu_map_size, dump_fd;
+    int i, nr_vcpus = 0, dump_fd;
     char *dump_mem, *dump_mem_start = NULL;
     struct xc_core_header header;
-    vcpu_guest_context_t     ctxt[MAX_VIRT_CPUS];
+    vcpu_guest_context_t  ctxt[MAX_VIRT_CPUS];
 
  
     if ((dump_fd = open(corename, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR)) < 0) {
@@ -54,33 +54,25 @@
         goto error_out;
     }
  
-    vcpu_map_size =  sizeof(info.vcpu_to_cpu) / sizeof(info.vcpu_to_cpu[0]);
-
-    for (i = 0, j = 0; i < vcpu_map_size; i++) {
-        if (info.vcpu_to_cpu[i] == -1) {
-            continue;
-        }
-        if (xc_domain_get_vcpu_context(xc_handle, domid, i, &ctxt[j])) {
-            PERROR("Could not get all vcpu contexts for domain");
-            goto error_out;
-        }
-        j++;
-    }
+    for (i = 0; i < info.max_vcpu_id; i++)
+        if (xc_domain_get_vcpu_context(xc_handle, domid,
+                                       i, &ctxt[nr_vcpus]) == 0)
+            nr_vcpus++;
  
     nr_pages = info.nr_pages;
 
     header.xch_magic = 0xF00FEBED; 
-    header.xch_nr_vcpus = info.vcpus;
+    header.xch_nr_vcpus = nr_vcpus;
     header.xch_nr_pages = nr_pages;
     header.xch_ctxt_offset = sizeof(struct xc_core_header);
     header.xch_index_offset = sizeof(struct xc_core_header) +
-        sizeof(vcpu_guest_context_t)*info.vcpus;
+        sizeof(vcpu_guest_context_t)*nr_vcpus;
     header.xch_pages_offset = round_pgup(sizeof(struct xc_core_header) +
-                                         (sizeof(vcpu_guest_context_t) * 
info.vcpus) + 
+                                         (sizeof(vcpu_guest_context_t) * 
nr_vcpus) +
                                          (nr_pages * sizeof(unsigned long)));
 
     write(dump_fd, &header, sizeof(struct xc_core_header));
-    write(dump_fd, &ctxt, sizeof(ctxt[0]) * info.vcpus);
+    write(dump_fd, &ctxt, sizeof(ctxt[0]) * nr_vcpus);
 
     if ((page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
         printf("Could not allocate memory\n");
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_domain.c   Mon Oct 24 15:08:13 2005
@@ -10,8 +10,9 @@
 #include <xen/memory.h>
 
 int xc_domain_create(int xc_handle,
-                     u32 ssidref,
-                     u32 *pdomid)
+                     uint32_t ssidref,
+                     xen_domain_handle_t handle,
+                     uint32_t *pdomid)
 {
     int err;
     dom0_op_t op;
@@ -19,16 +20,17 @@
     op.cmd = DOM0_CREATEDOMAIN;
     op.u.createdomain.domain = (domid_t)*pdomid;
     op.u.createdomain.ssidref = ssidref;
+    memcpy(op.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
     if ( (err = do_dom0_op(xc_handle, &op)) != 0 )
         return err;
 
-    *pdomid = (u16)op.u.createdomain.domain;
+    *pdomid = (uint16_t)op.u.createdomain.domain;
     return 0;
 }    
 
 
 int xc_domain_pause(int xc_handle, 
-                    u32 domid)
+                    uint32_t domid)
 {
     dom0_op_t op;
     op.cmd = DOM0_PAUSEDOMAIN;
@@ -38,7 +40,7 @@
 
 
 int xc_domain_unpause(int xc_handle,
-                      u32 domid)
+                      uint32_t domid)
 {
     dom0_op_t op;
     op.cmd = DOM0_UNPAUSEDOMAIN;
@@ -48,7 +50,7 @@
 
 
 int xc_domain_destroy(int xc_handle,
-                      u32 domid)
+                      uint32_t domid)
 {
     dom0_op_t op;
     op.cmd = DOM0_DESTROYDOMAIN;
@@ -57,9 +59,9 @@
 }
 
 int xc_domain_pincpu(int xc_handle,
-                     u32 domid, 
+                     uint32_t domid, 
                      int vcpu,
-                     cpumap_t *cpumap)
+                     cpumap_t cpumap)
 {
     dom0_op_t op;
     op.cmd = DOM0_PINCPUDOMAIN;
@@ -71,12 +73,12 @@
 
 
 int xc_domain_getinfo(int xc_handle,
-                      u32 first_domid,
+                      uint32_t first_domid,
                       unsigned int max_doms,
                       xc_dominfo_t *info)
 {
     unsigned int nr_doms;
-    u32 next_domid = first_domid;
+    uint32_t next_domid = first_domid;
     dom0_op_t op;
     int rc = 0; 
 
@@ -88,7 +90,7 @@
         op.u.getdomaininfo.domain = (domid_t)next_domid;
         if ( (rc = do_dom0_op(xc_handle, &op)) < 0 )
             break;
-        info->domid      = (u16)op.u.getdomaininfo.domain;
+        info->domid      = (uint16_t)op.u.getdomaininfo.domain;
 
         info->dying    = !!(op.u.getdomaininfo.flags & DOMFLAGS_DYING);
         info->shutdown = !!(op.u.getdomaininfo.flags & DOMFLAGS_SHUTDOWN);
@@ -111,13 +113,13 @@
         info->max_memkb = op.u.getdomaininfo.max_pages << (PAGE_SHIFT - 10);
         info->shared_info_frame = op.u.getdomaininfo.shared_info_frame;
         info->cpu_time = op.u.getdomaininfo.cpu_time;
-        info->vcpus = op.u.getdomaininfo.n_vcpu;
-        memcpy(&info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, 
-               sizeof(info->vcpu_to_cpu));
-        memcpy(&info->cpumap, &op.u.getdomaininfo.cpumap, 
-               sizeof(info->cpumap));
-
-        next_domid = (u16)op.u.getdomaininfo.domain + 1;
+        info->nr_online_vcpus = op.u.getdomaininfo.nr_online_vcpus;
+        info->max_vcpu_id = op.u.getdomaininfo.max_vcpu_id;
+
+        memcpy(info->handle, op.u.getdomaininfo.handle,
+               sizeof(xen_domain_handle_t));
+
+        next_domid = (uint16_t)op.u.getdomaininfo.domain + 1;
         info++;
     }
 
@@ -127,7 +129,7 @@
 }
 
 int xc_domain_getinfolist(int xc_handle,
-                          u32 first_domain,
+                          uint32_t first_domain,
                           unsigned int max_domains,
                           xc_domaininfo_t *info)
 {
@@ -154,8 +156,8 @@
 }
 
 int xc_domain_get_vcpu_context(int xc_handle,
-                               u32 domid,
-                               u32 vcpu,
+                               uint32_t domid,
+                               uint32_t vcpu,
                                vcpu_guest_context_t *ctxt)
 {
     int rc;
@@ -163,27 +165,22 @@
 
     op.cmd = DOM0_GETVCPUCONTEXT;
     op.u.getvcpucontext.domain = (domid_t)domid;
-    op.u.getvcpucontext.vcpu   = (u16)vcpu;
+    op.u.getvcpucontext.vcpu   = (uint16_t)vcpu;
     op.u.getvcpucontext.ctxt   = ctxt;
 
-    if ( (ctxt != NULL) &&
-         ((rc = mlock(ctxt, sizeof(*ctxt))) != 0) )
+    if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
         return rc;
 
     rc = do_dom0_op(xc_handle, &op);
 
-    if ( ctxt != NULL )
-        safe_munlock(ctxt, sizeof(*ctxt));
-
-    if ( rc > 0 )
-        return -ESRCH;
-    else
-        return rc;
+    safe_munlock(ctxt, sizeof(*ctxt));
+
+    return rc;
 }
 
 
 int xc_shadow_control(int xc_handle,
-                      u32 domid, 
+                      uint32_t domid, 
                       unsigned int sop,
                       unsigned long *dirty_bitmap,
                       unsigned long pages,
@@ -207,7 +204,7 @@
 }
 
 int xc_domain_setcpuweight(int xc_handle,
-                           u32 domid,
+                           uint32_t domid,
                            float weight)
 {
     int sched_id;
@@ -221,9 +218,9 @@
     {
         case SCHED_BVT:
         {
-            u32 mcuadv;
+            uint32_t mcuadv;
             int warpback;
-            s32 warpvalue;
+            int32_t warpvalue;
             long long warpl;
             long long warpu;
 
@@ -250,7 +247,7 @@
 }
 
 int xc_domain_setmaxmem(int xc_handle,
-                        u32 domid, 
+                        uint32_t domid, 
                         unsigned int max_memkb)
 {
     dom0_op_t op;
@@ -261,7 +258,7 @@
 }
 
 int xc_domain_memory_increase_reservation(int xc_handle,
-                                          u32 domid, 
+                                          uint32_t domid, 
                                           unsigned long nr_extents,
                                           unsigned int extent_order,
                                           unsigned int address_bits,
@@ -293,7 +290,7 @@
 }
 
 int xc_domain_memory_decrease_reservation(int xc_handle,
-                                          u32 domid, 
+                                          uint32_t domid, 
                                           unsigned long nr_extents,
                                           unsigned int extent_order,
                                           unsigned long *extent_start)
@@ -327,6 +324,44 @@
     }
 
     return err;
+}
+
+int xc_domain_max_vcpus(int xc_handle, uint32_t domid, unsigned int max)
+{
+    dom0_op_t op;
+    op.cmd = DOM0_MAX_VCPUS;
+    op.u.max_vcpus.domain = (domid_t)domid;
+    op.u.max_vcpus.max    = max;
+    return do_dom0_op(xc_handle, &op);
+}
+
+int xc_domain_sethandle(int xc_handle, uint32_t domid, 
+                        xen_domain_handle_t handle)
+{
+    dom0_op_t op;
+    op.cmd = DOM0_SETDOMAINHANDLE;
+    op.u.setdomainhandle.domain = (domid_t)domid;
+    memcpy(op.u.setdomainhandle.handle, handle, sizeof(xen_domain_handle_t));
+    return do_dom0_op(xc_handle, &op);
+}
+
+int xc_domain_get_vcpu_info(int xc_handle,
+                            uint32_t domid,
+                            uint32_t vcpu,
+                            xc_vcpuinfo_t *info)
+{
+    int rc;
+    dom0_op_t op;
+
+    op.cmd = DOM0_GETVCPUINFO;
+    op.u.getvcpuinfo.domain = (domid_t)domid;
+    op.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
+
+    rc = do_dom0_op(xc_handle, &op);
+
+    memcpy(info, &op.u.getvcpuinfo, sizeof(*info));
+
+    return rc;
 }
 
 /*
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_evtchn.c
--- a/tools/libxc/xc_evtchn.c   Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_evtchn.c   Mon Oct 24 15:08:13 2005
@@ -33,8 +33,8 @@
 
 
 int xc_evtchn_alloc_unbound(int xc_handle,
-                            u32 dom,
-                            u32 remote_dom)
+                            uint32_t dom,
+                            uint32_t remote_dom)
 {
     int         rc;
     evtchn_op_t op = {
@@ -50,7 +50,7 @@
 
 
 int xc_evtchn_status(int xc_handle,
-                     u32 dom,
+                     uint32_t dom,
                      int port,
                      xc_evtchn_status_t *status)
 {
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_gnttab.c
--- a/tools/libxc/xc_gnttab.c   Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_gnttab.c   Mon Oct 24 15:08:13 2005
@@ -40,12 +40,12 @@
 
 
 int xc_gnttab_map_grant_ref(int         xc_handle,
-                            u64    host_virt_addr,
-                            u32         dom,
-                            u16         ref,
-                            u16         flags,
-                            s16        *handle,
-                            u64   *dev_bus_addr)
+                            uint64_t    host_virt_addr,
+                            uint32_t    dom,
+                            uint16_t    ref,
+                            uint16_t    flags,
+                            int16_t    *handle,
+                            uint64_t   *dev_bus_addr)
 {
     struct gnttab_map_grant_ref op;
     int rc;
@@ -67,10 +67,10 @@
 
 
 int xc_gnttab_unmap_grant_ref(int       xc_handle,
-                              u64  host_virt_addr,
-                              u64  dev_bus_addr,
-                              u16       handle,
-                              s16      *status)
+                              uint64_t  host_virt_addr,
+                              uint64_t  dev_bus_addr,
+                              uint16_t  handle,
+                              int16_t  *status)
 {
     struct gnttab_unmap_grant_ref op;
     int rc;
@@ -89,9 +89,9 @@
 }
 
 int xc_gnttab_setup_table(int        xc_handle,
-                          u32        dom,
-                          u16        nr_frames,
-                          s16       *status,
+                          uint32_t   dom,
+                          uint16_t   nr_frames,
+                          int16_t   *status,
                           unsigned long **frame_list)
 {
     struct gnttab_setup_table op;
@@ -111,8 +111,8 @@
 }
 
 int xc_gnttab_dump_table(int        xc_handle,
-                         u32        dom,
-                         s16       *status)
+                         uint32_t   dom,
+                         int16_t   *status)
 {
     struct gnttab_dump_table op;
     int rc;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_ia64_stubs.c
--- a/tools/libxc/xc_ia64_stubs.c       Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_ia64_stubs.c       Mon Oct 24 15:08:13 2005
@@ -1,14 +1,14 @@
 #include "xg_private.h"
 #include "xenguest.h"
 
-int xc_linux_save(int xc_handle, int io_fd, u32 dom, u32 max_iters, 
-                  u32 max_factor, u32 flags)
+int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, 
+                  uint32_t max_factor, uint32_t flags)
 {
     PERROR("xc_linux_save not implemented\n");
     return -1;
 }
 
-int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns,
+int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom, unsigned long 
nr_pfns,
                      unsigned int store_evtchn, unsigned long *store_mfn,
                      unsigned int console_evtchn, unsigned long *console_mfn)
 {
@@ -32,7 +32,7 @@
 
 int
 xc_plan9_build(int xc_handle,
-               u32 domid,
+               uint32_t domid,
                const char *image_name,
                const char *cmdline,
                unsigned int control_evtchn, unsigned long flags)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_linux_build.c      Mon Oct 24 15:08:13 2005
@@ -64,7 +64,7 @@
 
 #define alloc_pt(ltab, vltab)                                           \
 do {                                                                    \
-    ltab = (u64)page_array[ppt_alloc++] << PAGE_SHIFT;                  \
+    ltab = (uint64_t)page_array[ppt_alloc++] << PAGE_SHIFT;                  \
     if ( vltab != NULL )                                                \
         munmap(vltab, PAGE_SIZE);                                       \
     if ( (vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,       \
@@ -76,7 +76,7 @@
 
 #if defined(__i386__)
 
-static int setup_pg_tables(int xc_handle, u32 dom,
+static int setup_pg_tables(int xc_handle, uint32_t dom,
                            vcpu_guest_context_t *ctxt,
                            unsigned long dsi_v_start,
                            unsigned long v_end,
@@ -123,7 +123,7 @@
     return -1;
 }
 
-static int setup_pg_tables_pae(int xc_handle, u32 dom,
+static int setup_pg_tables_pae(int xc_handle, uint32_t dom,
                                vcpu_guest_context_t *ctxt,
                                unsigned long dsi_v_start,
                                unsigned long v_end,
@@ -134,7 +134,7 @@
     l1_pgentry_64_t *vl1tab = NULL, *vl1e = NULL;
     l2_pgentry_64_t *vl2tab = NULL, *vl2e = NULL;
     l3_pgentry_64_t *vl3tab = NULL, *vl3e = NULL;
-    u64 l1tab, l2tab, l3tab;
+    uint64_t l1tab, l2tab, l3tab;
     unsigned long ppt_alloc, count, nmfn;
 
     /* First allocate page for page dir. */
@@ -173,7 +173,7 @@
             *vl2e++ = l1tab | L2_PROT;
         }
         
-        *vl1e = ((u64)page_array[count] << PAGE_SHIFT) | L1_PROT;
+        *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
         if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
              (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) ) 
             *vl1e &= ~_PAGE_RW;
@@ -199,7 +199,7 @@
 
 #if defined(__x86_64__)
 
-static int setup_pg_tables_64(int xc_handle, u32 dom,
+static int setup_pg_tables_64(int xc_handle, uint32_t dom,
                               vcpu_guest_context_t *ctxt,
                               unsigned long dsi_v_start,
                               unsigned long v_end,
@@ -280,7 +280,7 @@
 #ifdef __ia64__
 #include <asm/fpu.h> /* for FPSR_DEFAULT */
 static int setup_guest(int xc_handle,
-                       u32 dom,
+                       uint32_t dom,
                        char *image, unsigned long image_size,
                        gzFile initrd_gfd, unsigned long initrd_len,
                        unsigned long nr_pages,
@@ -289,7 +289,6 @@
                        const char *cmdline,
                        unsigned long shared_info_frame,
                        unsigned long flags,
-                       unsigned int vcpus,
                        unsigned int store_evtchn, unsigned long *store_mfn,
                        unsigned int console_evtchn, unsigned long *console_mfn)
 {
@@ -346,7 +345,7 @@
     *store_mfn = page_array[1];
     *console_mfn = page_array[2];
     printf("store_mfn: 0x%lx, console_mfn: 0x%lx\n",
-           (u64)store_mfn, (u64)console_mfn);
+           (uint64_t)store_mfn, (uint64_t)console_mfn);
 
     start_info = xc_map_foreign_range(
         xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
@@ -367,7 +366,7 @@
 }
 #else /* x86 */
 static int setup_guest(int xc_handle,
-                       u32 dom,
+                       uint32_t dom,
                        char *image, unsigned long image_size,
                        gzFile initrd_gfd, unsigned long initrd_len,
                        unsigned long nr_pages,
@@ -376,7 +375,6 @@
                        const char *cmdline,
                        unsigned long shared_info_frame,
                        unsigned long flags,
-                       unsigned int vcpus,
                        unsigned int store_evtchn, unsigned long *store_mfn,
                        unsigned int console_evtchn, unsigned long *console_mfn)
 {
@@ -500,11 +498,11 @@
            _p(dsi.v_start), _p(v_end));
     printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
 
-    if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
-    {
-        printf("Initial guest OS requires too much space\n"
+    if ( ((v_end - dsi.v_start)>>PAGE_SHIFT) > nr_pages )
+    {
+        PERROR("Initial guest OS requires too much space\n"
                "(%luMB is greater than %luMB limit)\n",
-               (v_end-dsi.v_start)>>20, (nr_pages<<PAGE_SHIFT)>>20);
+               (v_end-dsi.v_start)>>20, nr_pages>>(20-PAGE_SHIFT));
         goto error_out;
     }
 
@@ -573,7 +571,7 @@
     {
         if ( xc_add_mmu_update(
             xc_handle, mmu,
-            ((u64)page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
+            ((uint64_t)page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
             count) )
         {
             fprintf(stderr,"m2p update failure p=%lx m=%lx\n",
@@ -653,9 +651,6 @@
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
         shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
 
-    shared_info->n_vcpu = vcpus;
-    printf(" VCPUS:         %d\n", shared_info->n_vcpu);
-
     munmap(shared_info, PAGE_SIZE);
 
     /* Send the page update requests down to the hypervisor. */
@@ -679,12 +674,11 @@
 #endif
 
 int xc_linux_build(int xc_handle,
-                   u32 domid,
+                   uint32_t domid,
                    const char *image_name,
                    const char *ramdisk_name,
                    const char *cmdline,
                    unsigned long flags,
-                   unsigned int vcpus,
                    unsigned int store_evtchn,
                    unsigned long *store_mfn,
                    unsigned int console_evtchn,
@@ -735,7 +729,7 @@
     op.cmd = DOM0_GETDOMAININFO;
     op.u.getdomaininfo.domain = (domid_t)domid;
     if ( (xc_dom0_op(xc_handle, &op) < 0) || 
-         ((u16)op.u.getdomaininfo.domain != domid) )
+         ((uint16_t)op.u.getdomaininfo.domain != domid) )
     {
         PERROR("Could not get info on domain");
         goto error_out;
@@ -758,8 +752,7 @@
                      &vstartinfo_start, &vkern_entry,
                      &vstack_start, ctxt, cmdline,
                      op.u.getdomaininfo.shared_info_frame,
-                     flags, vcpus,
-                     store_evtchn, store_mfn,
+                     flags, store_evtchn, store_mfn,
                      console_evtchn, console_mfn) < 0 )
     {
         ERROR("Error constructing guest OS");
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c    Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_linux_restore.c    Mon Oct 24 15:08:13 2005
@@ -43,6 +43,8 @@
 
     while (r < count) {
         s = read(fd, &b[r], count - r);
+        if ((s == -1) && (errno == EINTR))
+            continue;
         if (s <= 0)
             break;
         r += s;
@@ -51,7 +53,7 @@
     return r;
 }
 
-int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns,
+int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom, unsigned long 
nr_pfns,
                      unsigned int store_evtchn, unsigned long *store_mfn,
                      unsigned int console_evtchn, unsigned long *console_mfn)
 {
@@ -640,16 +642,6 @@
         goto out;
     }
 
-    DPRINTF("Domain ready to be unpaused\n");
-    op.cmd = DOM0_UNPAUSEDOMAIN;
-    op.u.unpausedomain.domain = (domid_t)dom;
-    rc = xc_dom0_op(xc_handle, &op);
-    if (rc == 0) {
-        /* Success: print the domain id. */
-        DPRINTF("DOM=%u\n", dom);
-        return 0;
-    }
-
  out:
     if ( (rc != 0) && (dom != 0) )
         xc_domain_destroy(xc_handle, dom);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c       Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_linux_save.c       Mon Oct 24 15:08:13 2005
@@ -245,7 +245,7 @@
     return write(io_fd, buf, n);
 }
 
-static int print_stats( int xc_handle, u32 domid, 
+static int print_stats( int xc_handle, uint32_t domid, 
                         int pages_sent, xc_shadow_control_stats_t *stats,
                         int print )
 {
@@ -299,7 +299,7 @@
     return 0;
 }
 
-static int analysis_phase( int xc_handle, u32 domid, 
+static int analysis_phase( int xc_handle, uint32_t domid, 
                            int nr_pfns, unsigned long *arr, int runs )
 {
     long long start, now;
@@ -399,8 +399,8 @@
     return -1;
 }
 
-int xc_linux_save(int xc_handle, int io_fd, u32 dom, u32 max_iters, 
-                  u32 max_factor, u32 flags)
+int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, 
+                  uint32_t max_factor, uint32_t flags)
 {
     xc_dominfo_t info;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_load_aout9.c
--- a/tools/libxc/xc_load_aout9.c       Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_load_aout9.c       Mon Oct 24 15:08:13 2005
@@ -18,8 +18,8 @@
 #define KOFFSET(_p)       ((_p)&~KZERO)
 
 static int parseaout9image(char *, unsigned long, struct domain_setup_info *);
-static int loadaout9image(char *, unsigned long, int, u32, unsigned long *, 
struct domain_setup_info *);
-static void copyout(int, u32, unsigned long *, unsigned long, void *, int);
+static int loadaout9image(char *, unsigned long, int, uint32_t, unsigned long 
*, struct domain_setup_info *);
+static void copyout(int, uint32_t, unsigned long *, unsigned long, void *, 
int);
 struct Exec *get_header(char *, unsigned long, struct Exec *);
 
 
@@ -79,7 +79,7 @@
 loadaout9image(
     char *image,
     unsigned long image_size,
-    int xch, u32 dom,
+    int xch, uint32_t dom,
     unsigned long *parray,
     struct domain_setup_info *dsi)
 {
@@ -108,7 +108,7 @@
  */
 static void
 copyout(
-    int xch, u32 dom,
+    int xch, uint32_t dom,
     unsigned long *parray,
     unsigned long addr,
     void *buf,
@@ -135,8 +135,8 @@
     }
 }
 
-#define swap16(_v) ((((u16)(_v)>>8)&0xff)|(((u16)(_v)&0xff)<<8))
-#define swap32(_v) (((u32)swap16((u16)(_v))<<16)|(u32)swap16((u32)((_v)>>16)))
+#define swap16(_v) ((((uint16_t)(_v)>>8)&0xff)|(((uint16_t)(_v)&0xff)<<8))
+#define swap32(_v) 
(((uint32_t)swap16((uint16_t)(_v))<<16)|(uint32_t)swap16((uint32_t)((_v)>>16)))
 
 /*
  * Decode the header from the start of image and return it.
@@ -147,7 +147,7 @@
     unsigned long image_size,
     struct Exec *ehdr)
 {
-    u32 *v, x;
+    uint32_t *v, x;
     int i;
 
     if (A9_MAGIC == 0)
@@ -157,9 +157,9 @@
         return 0;
 
     /* ... all big endian words */
-    v = (u32 *)ehdr;
+    v = (uint32_t *)ehdr;
     for (i = 0; i < sizeof(*ehdr); i += 4) {
-        x = *(u32 *)&image[i];
+        x = *(uint32_t *)&image[i];
         v[i/4] = swap32(x);
     }
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_load_bin.c
--- a/tools/libxc/xc_load_bin.c Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_load_bin.c Mon Oct 24 15:08:13 2005
@@ -11,14 +11,14 @@
  * The layout of the xen_bin_image table is:
  *
  * Offset Type Name          Note
- * 0      u32  magic         required
- * 4      u32  flags         required
- * 8      u32  checksum      required
- * 12     u32  header_addr   required
- * 16     u32  load_addr     required
- * 20     u32  load_end_addr required
- * 24     u32  bss_end_addr  required
- * 28     u32  entry_addr    required
+ * 0      uint32_t  magic         required
+ * 4      uint32_t  flags         required
+ * 8      uint32_t  checksum      required
+ * 12     uint32_t  header_addr   required
+ * 16     uint32_t  load_addr     required
+ * 20     uint32_t  load_end_addr required
+ * 24     uint32_t  bss_end_addr  required
+ * 28     uint32_t  entry_addr    required
  *
  * - magic
  *   Magic number identifying the table. For images to be loaded by Xen 3, the
@@ -105,7 +105,7 @@
     char *image, unsigned long image_size, struct domain_setup_info *dsi);
 static int
 loadbinimage(
-    char *image, unsigned long image_size, int xch, u32 dom,
+    char *image, unsigned long image_size, int xch, uint32_t dom,
     unsigned long *parray, struct domain_setup_info *dsi);
 
 int probe_bin(char *image,
@@ -237,7 +237,7 @@
 
 static int
 loadbinimage(
-    char *image, unsigned long image_size, int xch, u32 dom,
+    char *image, unsigned long image_size, int xch, uint32_t dom,
     unsigned long *parray, struct domain_setup_info *dsi)
 {
     unsigned long size;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_load_elf.c
--- a/tools/libxc/xc_load_elf.c Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_load_elf.c Mon Oct 24 15:08:13 2005
@@ -22,11 +22,11 @@
     char *image, unsigned long image_size, struct domain_setup_info *dsi);
 static int
 loadelfimage(
-    char *image, unsigned long image_size, int xch, u32 dom,
+    char *image, unsigned long image_size, int xch, uint32_t dom,
     unsigned long *parray, struct domain_setup_info *dsi);
 static int
 loadelfsymtab(
-    char *image, int xch, u32 dom, unsigned long *parray,
+    char *image, int xch, uint32_t dom, unsigned long *parray,
     struct domain_setup_info *dsi);
 
 int probe_elf(char *image,
@@ -168,7 +168,7 @@
 
 static int
 loadelfimage(
-    char *image, unsigned long elfsize, int xch, u32 dom,
+    char *image, unsigned long elfsize, int xch, uint32_t dom,
     unsigned long *parray, struct domain_setup_info *dsi)
 {
     Elf_Ehdr *ehdr = (Elf_Ehdr *)image;
@@ -219,7 +219,7 @@
 
 static int
 loadelfsymtab(
-    char *image, int xch, u32 dom, unsigned long *parray,
+    char *image, int xch, uint32_t dom, unsigned long *parray,
     struct domain_setup_info *dsi)
 {
     Elf_Ehdr *ehdr = (Elf_Ehdr *)image, *sym_ehdr;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c     Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_misc.c     Mon Oct 24 15:08:13 2005
@@ -83,7 +83,7 @@
 }
 
 int xc_perfc_control(int xc_handle,
-                     u32 op,
+                     uint32_t op,
                      xc_perfc_desc_t *desc)
 {
     int rc;
@@ -131,11 +131,6 @@
     return rc;
 }
 
-long xc_init_store(int xc_handle, int remote_port)
-{
-    return ioctl(xc_handle, IOCTL_PRIVCMD_INITDOMAIN_STORE, remote_port);
-}
-
 /*
  * Local variables:
  * mode: C
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_physdev.c
--- a/tools/libxc/xc_physdev.c  Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_physdev.c  Mon Oct 24 15:08:13 2005
@@ -10,7 +10,7 @@
 #include "xc_private.h"
 
 int xc_physdev_pci_access_modify(int xc_handle,
-                                 u32 domid,
+                                 uint32_t domid,
                                  int bus,
                                  int dev,
                                  int func,
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_private.c  Mon Oct 24 15:08:13 2005
@@ -8,7 +8,7 @@
 #include "xc_private.h"
 #include <xen/memory.h>
 
-void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot,
+void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
                            unsigned long *arr, int num )
 {
     privcmd_mmapbatch_t ioctlx; 
@@ -35,7 +35,7 @@
 
 /*******************/
 
-void *xc_map_foreign_range(int xc_handle, u32 dom,
+void *xc_map_foreign_range(int xc_handle, uint32_t dom,
                            int size, int prot,
                            unsigned long mfn )
 {
@@ -66,7 +66,7 @@
 
 /* NB: arr must be mlock'ed */
 int xc_get_pfn_type_batch(int xc_handle, 
-                          u32 dom, int num, unsigned long *arr)
+                          uint32_t dom, int num, unsigned long *arr)
 {
     dom0_op_t op;
     op.cmd = DOM0_GETPAGEFRAMEINFO2;
@@ -79,7 +79,7 @@
 #define GETPFN_ERR (~0U)
 unsigned int get_pfn_type(int xc_handle, 
                           unsigned long mfn, 
-                          u32 dom)
+                          uint32_t dom)
 {
     dom0_op_t op;
     op.cmd = DOM0_GETPAGEFRAMEINFO;
@@ -114,11 +114,7 @@
         goto out1;
     }
 
-    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
-    {
-        fprintf(stderr, "Dom_mmuext operation failed (rc=%ld errno=%d)-- need 
to"
-                " rebuild the user-space tool set?\n",ret,errno);
-    }
+    ret = do_xen_hypercall(xc_handle, &hypercall);
 
     safe_munlock(op, nr_ops*sizeof(*op));
 
@@ -227,11 +223,7 @@
         break;
     }
 
-    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
-    {
-        fprintf(stderr, "hypercall failed (rc=%ld errno=%d)-- need to"
-                " rebuild the user-space tool set?\n",ret,errno);
-    }
+    ret = do_xen_hypercall(xc_handle, &hypercall);
 
     switch ( cmd )
     {
@@ -256,16 +248,15 @@
 {
     dom0_op_t op;
 
-    op.cmd = DOM0_GETVCPUCONTEXT;
-    op.u.getvcpucontext.domain = (domid_t)domid;
-    op.u.getvcpucontext.vcpu   = (u16)vcpu;
-    op.u.getvcpucontext.ctxt   = NULL;
+    op.cmd = DOM0_GETVCPUINFO;
+    op.u.getvcpuinfo.domain = (domid_t)domid;
+    op.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
     if ( (do_dom0_op(xc_handle, &op) < 0) )
     {
         PERROR("Could not get info on domain");
         return -1;
     }
-    return op.u.getvcpucontext.cpu_time;
+    return op.u.getvcpuinfo.cpu_time;
 }
 
 
@@ -282,7 +273,7 @@
 }
 
 int xc_get_pfn_list(int xc_handle,
-                    u32 domid, 
+                    uint32_t domid, 
                     unsigned long *pfn_buf, 
                     unsigned long max_pfns)
 {
@@ -324,7 +315,7 @@
 
 #ifdef __ia64__
 int xc_ia64_get_pfn_list(int xc_handle,
-                         u32 domid, 
+                         uint32_t domid, 
                          unsigned long *pfn_buf, 
                          unsigned int start_page,
                          unsigned int nr_pages)
@@ -352,7 +343,7 @@
     return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
 }
 
-long xc_get_max_pages(int xc_handle, u32 domid)
+long xc_get_max_pages(int xc_handle, uint32_t domid)
 {
     dom0_op_t op;
     op.cmd = DOM0_GETDOMAININFO;
@@ -362,7 +353,7 @@
 }
 #endif
 
-long xc_get_tot_pages(int xc_handle, u32 domid)
+long xc_get_tot_pages(int xc_handle, uint32_t domid)
 {
     dom0_op_t op;
     op.cmd = DOM0_GETDOMAININFO;
@@ -372,7 +363,7 @@
 }
 
 int xc_copy_to_domain_page(int xc_handle,
-                           u32 domid,
+                           uint32_t domid,
                            unsigned long dst_pfn, 
                            void *src_page)
 {
@@ -387,8 +378,8 @@
 
 unsigned long xc_get_filesz(int fd)
 {
-    u16 sig;
-    u32 _sz = 0;
+    uint16_t sig;
+    uint32_t _sz = 0;
     unsigned long sz;
 
     lseek(fd, 0, SEEK_SET);
@@ -408,7 +399,7 @@
 }
 
 void xc_map_memcpy(unsigned long dst, char *src, unsigned long size,
-                   int xch, u32 dom, unsigned long *parray,
+                   int xch, uint32_t dom, unsigned long *parray,
                    unsigned long vstart)
 {
     char *va;
@@ -460,7 +451,7 @@
 }
 
 unsigned long xc_make_page_below_4G(
-    int xc_handle, u32 domid, unsigned long mfn)
+    int xc_handle, uint32_t domid, unsigned long mfn)
 {
     unsigned long new_mfn;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c   Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_ptrace.c   Mon Oct 24 15:08:13 2005
@@ -138,7 +138,7 @@
     int perm)
 {
     unsigned long l2p, l1p, p, va = (unsigned long)guest_va;
-    u64 *l3, *l2, *l1;
+    uint64_t *l3, *l2, *l1;
     static void *v;
 
     FETCH_REGS(cpu);
@@ -319,7 +319,7 @@
 xc_ptrace(
     int xc_handle,
     enum __ptrace_request request,
-    u32 domid,
+    uint32_t domid,
     long eaddr,
     long edata)
 {
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_ptrace_core.c
--- a/tools/libxc/xc_ptrace_core.c      Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_ptrace_core.c      Mon Oct 24 15:08:13 2005
@@ -222,7 +222,7 @@
 xc_ptrace_core(
     int xc_handle,
     enum __ptrace_request request,
-    u32 domfd,
+    uint32_t domfd,
     long eaddr,
     long edata)
 {
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_sedf.c
--- a/tools/libxc/xc_sedf.c     Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_sedf.c     Mon Oct 24 15:08:13 2005
@@ -11,7 +11,7 @@
 #include "xc_private.h"
 
 int xc_sedf_domain_set(int xc_handle,
-                          u32 domid, u64 period, u64 slice,u64 latency, u16 
extratime,u16 weight)
+                          uint32_t domid, uint64_t period, uint64_t 
slice,uint64_t latency, uint16_t extratime,uint16_t weight)
 {
     dom0_op_t op;
     struct sedf_adjdom *p = &op.u.adjustdom.u.sedf;
@@ -29,7 +29,7 @@
     return do_dom0_op(xc_handle, &op);
 }
 
-int xc_sedf_domain_get(int xc_handle, u32 domid, u64 *period, u64 *slice, u64* 
latency, u16* extratime, u16* weight)
+int xc_sedf_domain_get(int xc_handle, uint32_t domid, uint64_t *period, 
uint64_t *slice, uint64_t* latency, uint16_t* extratime, uint16_t* weight)
 {
     dom0_op_t op;
     int ret;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xc_vmx_build.c
--- a/tools/libxc/xc_vmx_build.c        Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xc_vmx_build.c        Mon Oct 24 15:08:13 2005
@@ -29,14 +29,17 @@
 #define E820_SHARED_PAGE 17
 #define E820_XENSTORE    18
 
-#define E820_MAP_PAGE        0x00090000
-#define E820_MAP_NR_OFFSET   0x000001E8
-#define E820_MAP_OFFSET      0x000002D0
+#define E820_MAP_PAGE       0x00090000
+#define E820_MAP_NR_OFFSET  0x000001E8
+#define E820_MAP_OFFSET     0x000002D0
+
+#define VCPU_NR_PAGE        0x0009F000
+#define VCPU_NR_OFFSET      0x00000800
 
 struct e820entry {
-    u64 addr;
-    u64 size;
-    u32 type;
+    uint64_t addr;
+    uint64_t size;
+    uint32_t type;
 } __attribute__((packed));
 
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
@@ -47,7 +50,7 @@
     char *elfbase, unsigned long elfsize, struct domain_setup_info *dsi);
 static int
 loadelfimage(
-    char *elfbase, int xch, u32 dom, unsigned long *parray,
+    char *elfbase, int xch, uint32_t dom, unsigned long *parray,
     struct domain_setup_info *dsi);
 
 static unsigned char build_e820map(void *e820_page, unsigned long mem_size)
@@ -120,23 +123,22 @@
  * Use E820 reserved memory 0x9F800 to pass number of vcpus to vmxloader
  * vmxloader will use it to config ACPI MADT table
  */
-#define VCPU_MAGIC 0x76637075 /* "vcpu" */
-static int
-set_nr_vcpus(int xc_handle, u32 dom, unsigned long *pfn_list,
-             struct domain_setup_info *dsi, unsigned long vcpus)
-{
-    char          *va_map;
-    unsigned long *va_vcpus;
-
-    va_map = xc_map_foreign_range(
-        xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
-        pfn_list[(0x9F000 - dsi->v_start) >> PAGE_SHIFT]);
+#define VCPU_MAGIC      0x76637075  /* "vcpu" */
+static int set_vcpu_nr(int xc_handle, uint32_t dom,
+                        unsigned long *pfn_list, unsigned int vcpus)
+{
+    char         *va_map;
+    unsigned int *va_vcpus;
+
+    va_map = xc_map_foreign_range(xc_handle, dom,
+                                  PAGE_SIZE, PROT_READ|PROT_WRITE,
+                                  pfn_list[VCPU_NR_PAGE >> PAGE_SHIFT]);
     if ( va_map == NULL )
         return -1;
 
-    va_vcpus = (unsigned long *)(va_map + 0x800);
-    *va_vcpus++ = VCPU_MAGIC;
-    *va_vcpus++ = vcpus;
+    va_vcpus = (unsigned int *)(va_map + VCPU_NR_OFFSET);
+    va_vcpus[0] = VCPU_MAGIC;
+    va_vcpus[1] = vcpus;
 
     munmap(va_map, PAGE_SIZE);
 
@@ -144,7 +146,7 @@
 }
 
 #ifdef __i386__
-static int zap_mmio_range(int xc_handle, u32 dom,
+static int zap_mmio_range(int xc_handle, uint32_t dom,
                           l2_pgentry_32_t *vl2tab,
                           unsigned long mmio_range_start,
                           unsigned long mmio_range_size)
@@ -173,7 +175,7 @@
     return 0;
 }
 
-static int zap_mmio_ranges(int xc_handle, u32 dom, unsigned long l2tab,
+static int zap_mmio_ranges(int xc_handle, uint32_t dom, unsigned long l2tab,
                            unsigned char e820_map_nr, unsigned char *e820map)
 {
     unsigned int i;
@@ -197,7 +199,7 @@
     return 0;
 }
 #else
-static int zap_mmio_range(int xc_handle, u32 dom,
+static int zap_mmio_range(int xc_handle, uint32_t dom,
                           l3_pgentry_t *vl3tab,
                           unsigned long mmio_range_start,
                           unsigned long mmio_range_size)
@@ -247,7 +249,7 @@
     return 0;
 }
 
-static int zap_mmio_ranges(int xc_handle, u32 dom, unsigned long l3tab,
+static int zap_mmio_ranges(int xc_handle, uint32_t dom, unsigned long l3tab,
                            unsigned char e820_map_nr, unsigned char *e820map)
 {
     unsigned int i;
@@ -271,13 +273,12 @@
 #endif
 
 static int setup_guest(int xc_handle,
-                       u32 dom, int memsize,
+                       uint32_t dom, int memsize,
                        char *image, unsigned long image_size,
                        unsigned long nr_pages,
                        vcpu_guest_context_t *ctxt,
                        unsigned long shared_info_frame,
                        unsigned int control_evtchn,
-                       unsigned long flags,
                        unsigned int vcpus,
                        unsigned int store_evtchn,
                        unsigned long *store_mfn)
@@ -366,7 +367,7 @@
         goto error_out;
 
     /* First allocate page for page dir or pdpt */
-    ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
+    ppt_alloc = vpt_start >> PAGE_SHIFT;
     if ( page_array[ppt_alloc] > 0xfffff )
     {
         unsigned long nmfn;
@@ -388,8 +389,8 @@
                                         l2tab >> PAGE_SHIFT)) == NULL )
         goto error_out;
     memset(vl2tab, 0, PAGE_SIZE);
-    vl2e = &vl2tab[l2_table_offset(dsi.v_start)];
-    for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
+    vl2e = &vl2tab[l2_table_offset(0)];
+    for ( count = 0; count < (v_end >> PAGE_SHIFT); count++ )
     {
         if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
         {
@@ -404,7 +405,7 @@
                 goto error_out;
             }
             memset(vl1tab, 0, PAGE_SIZE);
-            vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
+            vl1e = &vl1tab[l1_table_offset(count << PAGE_SHIFT)];
             *vl2e++ = l1tab | L2_PROT;
         }
 
@@ -436,9 +437,8 @@
         vl3tab[i] = l2tab | L3_PROT;
     }
 
-    vl3e = &vl3tab[l3_table_offset(dsi.v_start)];
-
-    for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
+    vl3e = &vl3tab[l3_table_offset(0)];
+    for ( count = 0; count < (v_end >> PAGE_SHIFT); count++ )
     {
         if (!(count & (1 << (L3_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)))){
             l2tab = vl3tab[count >> (L3_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)]
@@ -452,7 +452,7 @@
                                                 l2tab >> PAGE_SHIFT)) == NULL )
                 goto error_out;
 
-            vl2e = &vl2tab[l2_table_offset(dsi.v_start + (count << 
PAGE_SHIFT))];
+            vl2e = &vl2tab[l2_table_offset(count << PAGE_SHIFT)];
         }
         if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
         {
@@ -467,7 +467,7 @@
                 goto error_out;
             }
             memset(vl1tab, 0, PAGE_SIZE);
-            vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
+            vl1e = &vl1tab[l1_table_offset(count << PAGE_SHIFT)];
             *vl2e++ = l1tab | L2_PROT;
         }
 
@@ -488,7 +488,10 @@
             goto error_out;
     }
 
-    set_nr_vcpus(xc_handle, dom, page_array, &dsi, vcpus);
+    if (set_vcpu_nr(xc_handle, dom, page_array, vcpus)) {
+        fprintf(stderr, "Couldn't set vcpu number for VMX guest.\n");
+        goto error_out;
+    }
 
     *store_mfn = page_array[(v_end-2) >> PAGE_SHIFT];
     shared_page_frame = (v_end - PAGE_SIZE) >> PAGE_SHIFT;
@@ -518,9 +521,6 @@
     /* Mask all upcalls... */
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
         shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
-
-    shared_info->n_vcpu = vcpus;
-    printf(" VCPUS:         %d\n", shared_info->n_vcpu);
 
     munmap(shared_info, PAGE_SIZE);
 
@@ -569,37 +569,34 @@
     return -1;
 }
 
-
 #define VMX_FEATURE_FLAG 0x20
 
 static int vmx_identify(void)
 {
     int eax, ecx;
 
-#ifdef __i386__
-    __asm__ __volatile__ ("pushl %%ebx; cpuid; popl %%ebx"
+    __asm__ __volatile__ (
+#if defined(__i386__)
+                          "push %%ebx; cpuid; pop %%ebx"
+#elif defined(__x86_64__)
+                          "push %%rbx; cpuid; pop %%rbx"
+#endif
                           : "=a" (eax), "=c" (ecx)
                           : "0" (1)
                           : "dx");
-#elif defined __x86_64__
-    __asm__ __volatile__ ("pushq %%rbx; cpuid; popq %%rbx"
-                          : "=a" (eax), "=c" (ecx)
-                          : "0" (1)
-                          : "dx");
-#endif
 
     if (!(ecx & VMX_FEATURE_FLAG)) {
         return -1;
     }
+
     return 0;
 }
 
 int xc_vmx_build(int xc_handle,
-                 u32 domid,
+                 uint32_t domid,
                  int memsize,
                  const char *image_name,
                  unsigned int control_evtchn,
-                 unsigned long flags,
                  unsigned int vcpus,
                  unsigned int store_evtchn,
                  unsigned long *store_mfn)
@@ -635,7 +632,7 @@
     op.cmd = DOM0_GETDOMAININFO;
     op.u.getdomaininfo.domain = (domid_t)domid;
     if ( (xc_dom0_op(xc_handle, &op) < 0) ||
-         ((u16)op.u.getdomaininfo.domain != domid) )
+         ((uint16_t)op.u.getdomaininfo.domain != domid) )
     {
         PERROR("Could not get info on domain");
         goto error_out;
@@ -654,9 +651,9 @@
         goto error_out;
     }
 
-    if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
-                     ctxt, op.u.getdomaininfo.shared_info_frame, 
control_evtchn,
-                     flags, vcpus, store_evtchn, store_mfn) < 0)
+    if ( setup_guest(xc_handle, domid, memsize, image, image_size,
+                     nr_pages, ctxt, op.u.getdomaininfo.shared_info_frame,
+                     control_evtchn, vcpus, store_evtchn, store_mfn) < 0)
     {
         ERROR("Error constructing guest OS");
         goto error_out;
@@ -790,7 +787,7 @@
 
 static int
 loadelfimage(
-    char *elfbase, int xch, u32 dom, unsigned long *parray,
+    char *elfbase, int xch, uint32_t dom, unsigned long *parray,
     struct domain_setup_info *dsi)
 {
     Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfbase;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xenctrl.h     Mon Oct 24 15:08:13 2005
@@ -10,16 +10,6 @@
 #define XENCTRL_H
 
 #include <stdint.h>
-
-typedef uint8_t            u8;
-typedef uint16_t           u16;
-typedef uint32_t           u32;
-typedef uint64_t           u64;
-typedef int8_t             s8;
-typedef int16_t            s16;
-typedef int32_t            s32;
-typedef int64_t            s64;
-
 #include <sys/ptrace.h>
 #include <xen/xen.h>
 #include <xen/dom0_ops.h>
@@ -105,14 +95,14 @@
 long xc_ptrace(
     int xc_handle,
     enum __ptrace_request request, 
-    u32  domid,
+    uint32_t  domid,
     long addr, 
     long data);
 
 long xc_ptrace_core(
     int xc_handle,
     enum __ptrace_request request, 
-    u32 domid, 
+    uint32_t domid, 
     long addr, 
     long data);
 
@@ -133,30 +123,42 @@
  */
 
 typedef struct {
-    u32           domid;
-    u32           ssidref;
+    uint32_t      domid;
+    uint32_t      ssidref;
     unsigned int  dying:1, crashed:1, shutdown:1, 
                   paused:1, blocked:1, running:1;
     unsigned int  shutdown_reason; /* only meaningful if shutdown==1 */
     unsigned long nr_pages;
     unsigned long shared_info_frame;
-    u64           cpu_time;
+    uint64_t      cpu_time;
     unsigned long max_memkb;
-    unsigned int  vcpus;
-    s32           vcpu_to_cpu[MAX_VIRT_CPUS];
-    cpumap_t      cpumap[MAX_VIRT_CPUS];
+    unsigned int  nr_online_vcpus;
+    unsigned int  max_vcpu_id;
+    xen_domain_handle_t handle;
 } xc_dominfo_t;
 
 typedef dom0_getdomaininfo_t xc_domaininfo_t;
 int xc_domain_create(int xc_handle, 
-                     u32 ssidref,
-                     u32 *pdomid);
+                     uint32_t ssidref,
+                     xen_domain_handle_t handle,
+                     uint32_t *pdomid);
 
 
 int xc_domain_dumpcore(int xc_handle, 
-                       u32 domid,
+                       uint32_t domid,
                        const char *corename);
 
+/*
+ * This function sets the maximum number vcpus that a domian may create.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface.
+ * @parm domid the domain id in which vcpus are to be created.
+ * @parm max the maximum number of vcpus that the domain may create.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_max_vcpus(int xc_handle,
+                        uint32_t domid, 
+                        unsigned int max);
 
 /**
  * This function pauses a domain. A paused domain still exists in memory
@@ -167,7 +169,7 @@
  * @return 0 on success, -1 on failure.
  */
 int xc_domain_pause(int xc_handle, 
-                    u32 domid);
+                    uint32_t domid);
 /**
  * This function unpauses a domain.  The domain should have been previously
  * paused.
@@ -177,7 +179,7 @@
  * return 0 on success, -1 on failure
  */
 int xc_domain_unpause(int xc_handle, 
-                      u32 domid);
+                      uint32_t domid);
 
 /**
  * This function will destroy a domain.  Destroying a domain removes the domain
@@ -189,11 +191,12 @@
  * @return 0 on success, -1 on failure
  */
 int xc_domain_destroy(int xc_handle, 
-                      u32 domid);
+                      uint32_t domid);
 int xc_domain_pincpu(int xc_handle,
-                     u32 domid,
+                     uint32_t domid,
                      int vcpu,
-                     cpumap_t *cpumap);
+                     cpumap_t cpumap);
+
 /**
  * This function will return information about one or more domains. It is
  * designed to iterate over the list of domains. If a single domain is
@@ -210,7 +213,7 @@
  * @return the number of domains enumerated or -1 on error
  */
 int xc_domain_getinfo(int xc_handle,
-                      u32 first_domid, 
+                      uint32_t first_domid, 
                       unsigned int max_doms,
                       xc_dominfo_t *info);
 
@@ -228,37 +231,46 @@
  * @return the number of domains enumerated or -1 on error
  */
 int xc_domain_getinfolist(int xc_handle,
-                          u32 first_domain,
+                          uint32_t first_domain,
                           unsigned int max_domains,
                           xc_domaininfo_t *info);
 
 /**
- * This function returns information about one domain.  This information is
- * more detailed than the information from xc_domain_getinfo().
+ * This function returns information about the execution context of a
+ * particular vcpu of a domain.
  *
  * @parm xc_handle a handle to an open hypervisor interface
  * @parm domid the domain to get information from
- * @parm info a pointer to an xc_domaininfo_t to store the domain information
+ * @parm vcpu the vcpu number
  * @parm ctxt a pointer to a structure to store the execution context of the
  *            domain
  * @return 0 on success, -1 on failure
  */
 int xc_domain_get_vcpu_context(int xc_handle,
-                               u32 domid,
-                               u32 vcpu,
+                               uint32_t domid,
+                               uint32_t vcpu,
                                vcpu_guest_context_t *ctxt);
 
+typedef dom0_getvcpuinfo_t xc_vcpuinfo_t;
+int xc_domain_get_vcpu_info(int xc_handle,
+                            uint32_t domid,
+                            uint32_t vcpu,
+                            xc_vcpuinfo_t *info);
+
+
 int xc_domain_setcpuweight(int xc_handle,
-                           u32 domid,
+                           uint32_t domid,
                            float weight);
 long long xc_domain_get_cpu_usage(int xc_handle,
                                   domid_t domid,
                                   int vcpu);
 
+int xc_domain_sethandle(int xc_handle, uint32_t domid, 
+                        xen_domain_handle_t handle);
 
 typedef dom0_shadow_control_stats_t xc_shadow_control_stats_t;
 int xc_shadow_control(int xc_handle,
-                      u32 domid, 
+                      uint32_t domid, 
                       unsigned int sop,
                       unsigned long *dirty_bitmap,
                       unsigned long pages,
@@ -268,10 +280,10 @@
                            unsigned long ctx_allow);
 
 int xc_bvtsched_domain_set(int xc_handle,
-                           u32 domid,
-                           u32 mcuadv,
+                           uint32_t domid,
+                           uint32_t mcuadv,
                            int warpback,
-                           s32 warpvalue,
+                           int32_t warpvalue,
                            long long warpl,
                            long long warpu);
 
@@ -279,20 +291,24 @@
                            unsigned long *ctx_allow);
 
 int xc_bvtsched_domain_get(int xc_handle,
-                           u32 domid,
-                           u32 *mcuadv,
+                           uint32_t domid,
+                           uint32_t *mcuadv,
                            int *warpback,
-                           s32 *warpvalue,
+                           int32_t *warpvalue,
                            long long *warpl,
                            long long *warpu);
 
 int xc_sedf_domain_set(int xc_handle,
-                          u32 domid,
-                          u64 period, u64 slice, u64 latency, u16 extratime, 
u16 weight);
+                       uint32_t domid,
+                       uint64_t period, uint64_t slice,
+                       uint64_t latency, uint16_t extratime,
+                       uint16_t weight);
 
 int xc_sedf_domain_get(int xc_handle,
-                          u32 domid,
-                          u64* period, u64 *slice, u64 *latency, u16 
*extratime, u16* weight);
+                       uint32_t domid,
+                       uint64_t* period, uint64_t *slice,
+                       uint64_t *latency, uint16_t *extratime,
+                       uint16_t *weight);
 
 typedef evtchn_status_t xc_evtchn_status_t;
 
@@ -311,16 +327,16 @@
  * @return allocated port (in @dom) on success, -1 on failure
  */
 int xc_evtchn_alloc_unbound(int xc_handle,
-                            u32 dom,
-                            u32 remote_dom);
+                            uint32_t dom,
+                            uint32_t remote_dom);
 
 int xc_evtchn_status(int xc_handle,
-                     u32 dom, /* may be DOMID_SELF */
+                     uint32_t dom, /* may be DOMID_SELF */
                      int port,
                      xc_evtchn_status_t *status);
 
 int xc_physdev_pci_access_modify(int xc_handle,
-                                 u32 domid,
+                                 uint32_t domid,
                                  int bus,
                                  int dev,
                                  int func,
@@ -339,29 +355,29 @@
                 int *sched_id);
 
 int xc_domain_setmaxmem(int xc_handle,
-                        u32 domid, 
+                        uint32_t domid, 
                         unsigned int max_memkb);
 
 int xc_domain_memory_increase_reservation(int xc_handle,
-                                          u32 domid, 
+                                          uint32_t domid, 
                                           unsigned long nr_extents,
                                           unsigned int extent_order,
                                           unsigned int address_bits,
                                          unsigned long *extent_start);
 
 int xc_domain_memory_decrease_reservation(int xc_handle,
-                                          u32 domid, 
+                                          uint32_t domid, 
                                           unsigned long nr_extents,
                                           unsigned int extent_order,
                                          unsigned long *extent_start);
 
-unsigned long xc_make_page_below_4G(int xc_handle, u32 domid, 
+unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid, 
                                    unsigned long mfn);
 
 typedef dom0_perfc_desc_t xc_perfc_desc_t;
 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc array. */
 int xc_perfc_control(int xc_handle,
-                     u32 op,
+                     uint32_t op,
                      xc_perfc_desc_t *desc);
 
 /* read/write msr */
@@ -384,27 +400,29 @@
  * @parm prot same flag as in mmap().
  * @parm mfn the frame address to map.
  */
-void *xc_map_foreign_range(int xc_handle, u32 dom,
+void *xc_map_foreign_range(int xc_handle, uint32_t dom,
                             int size, int prot,
                             unsigned long mfn );
 
-void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot,
+void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
                            unsigned long *arr, int num );
 
-int xc_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf, 
+int xc_get_pfn_list(int xc_handle, uint32_t domid, unsigned long *pfn_buf, 
                     unsigned long max_pfns);
 
-int xc_ia64_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf, 
-                    unsigned int start_page, unsigned int nr_pages);
-
-long xc_get_max_pages(int xc_handle, u32 domid);
+int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
+                         unsigned long *pfn_buf, 
+                         unsigned int start_page, unsigned int nr_pages);
+
+long xc_get_max_pages(int xc_handle, uint32_t domid);
 
 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
                 domid_t dom);
 
 int xc_memory_op(int xc_handle, int cmd, void *arg);
 
-int xc_get_pfn_type_batch(int xc_handle, u32 dom, int num, unsigned long *arr);
+int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
+                          int num, unsigned long *arr);
 
 
 /*\
@@ -438,47 +456,38 @@
  */
 int xc_grant_interface_close(int xc_handle);
 
-int xc_gnttab_map_grant_ref(int  xc_handle,
-                            u64  host_virt_addr,
-                            u32  dom,
-                            u16  ref,
-                            u16  flags,
-                            s16 *handle,
-                            u64 *dev_bus_addr);
+int xc_gnttab_map_grant_ref(int      xc_handle,
+                            uint64_t host_virt_addr,
+                            uint32_t dom,
+                            uint16_t ref,
+                            uint16_t flags,
+                            int16_t *handle,
+                            uint64_t *dev_bus_addr);
 
 int xc_gnttab_unmap_grant_ref(int  xc_handle,
-                              u64  host_virt_addr,
-                              u64  dev_bus_addr,
-                              u16  handle,
-                              s16 *status);
+                              uint64_t  host_virt_addr,
+                              uint64_t  dev_bus_addr,
+                              uint16_t  handle,
+                              int16_t *status);
 
 int xc_gnttab_setup_table(int        xc_handle,
-                          u32        dom,
-                          u16        nr_frames,
-                          s16       *status,
+                          uint32_t   dom,
+                          uint16_t   nr_frames,
+                          int16_t   *status,
                           unsigned long **frame_list);
 
 /* Grant debug builds only: */
 int xc_gnttab_dump_table(int        xc_handle,
-                         u32        dom,
-                         s16       *status);
+                         uint32_t   dom,
+                         int16_t   *status);
 
 /* Get current total pages allocated to a domain. */
-long xc_get_tot_pages(int xc_handle, u32 domid);
+long xc_get_tot_pages(int xc_handle, uint32_t domid);
 
 /* Execute a privileged dom0 operation. */
 int xc_dom0_op(int xc_handle, dom0_op_t *op);
 
 int xc_version(int xc_handle, int cmd, void *arg);
-
-/* Initializes the store (for dom0)
-   remote_port should be the remote end of a bound interdomain channel between
-   the store and dom0.
-
-   This function returns a shared frame that should be passed to
-   xs_introduce_domain
- */
-long xc_init_store(int xc_handle, int remote_port);
 
 /*
  * MMU updates.
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h    Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xenguest.h    Mon Oct 24 15:08:13 2005
@@ -46,19 +46,16 @@
                    const char *ramdisk_name,
                    const char *cmdline,
                    unsigned long flags,
-                   unsigned int vcpus,
                    unsigned int store_evtchn,
                    unsigned long *store_mfn,
                    unsigned int console_evtchn,
                    unsigned long *console_mfn);
 
-struct mem_map;
 int xc_vmx_build(int xc_handle,
                  uint32_t domid,
                  int memsize,
                  const char *image_name,
                  unsigned int control_evtchn,
-                 unsigned long flags,
                  unsigned int vcpus,
                  unsigned int store_evtchn,
                  unsigned long *store_mfn);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h  Fri Oct 21 19:58:39 2005
+++ b/tools/libxc/xg_private.h  Mon Oct 24 15:08:13 2005
@@ -60,11 +60,11 @@
 #define PAGE_SIZE               (1UL << PAGE_SHIFT)
 #define PAGE_MASK               (~(PAGE_SIZE-1))
 
-typedef u32 l1_pgentry_32_t;
-typedef u32 l2_pgentry_32_t;
-typedef u64 l1_pgentry_64_t;
-typedef u64 l2_pgentry_64_t;
-typedef u64 l3_pgentry_64_t;
+typedef uint32_t l1_pgentry_32_t;
+typedef uint32_t l2_pgentry_32_t;
+typedef uint64_t l1_pgentry_64_t;
+typedef uint64_t l2_pgentry_64_t;
+typedef uint64_t l3_pgentry_64_t;
 typedef unsigned long l1_pgentry_t;
 typedef unsigned long l2_pgentry_t;
 #if defined(__x86_64__)
@@ -129,7 +129,7 @@
 typedef int (*parseimagefunc)(char *image, unsigned long image_size,
                              struct domain_setup_info *dsi);
 typedef int (*loadimagefunc)(char *image, unsigned long image_size, int xch,
-                            u32 dom, unsigned long *parray,
+                            uint32_t dom, unsigned long *parray,
                             struct domain_setup_info *dsi);
 
 struct load_funcs
@@ -153,13 +153,13 @@
 
 unsigned long xc_get_m2p_start_mfn (int xc_handle);
 
-int xc_copy_to_domain_page(int xc_handle, u32 domid,
+int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
                             unsigned long dst_pfn, void *src_page);
 
 unsigned long xc_get_filesz(int fd);
 
 void xc_map_memcpy(unsigned long dst, char *src, unsigned long size,
-                   int xch, u32 dom, unsigned long *parray,
+                   int xch, uint32_t dom, unsigned long *parray,
                    unsigned long vstart);
 
 int pin_table(int xc_handle, unsigned int type, unsigned long mfn,
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/misc/netfix
--- a/tools/misc/netfix Fri Oct 21 19:58:39 2005
+++ b/tools/misc/netfix Mon Oct 24 15:08:13 2005
@@ -3,7 +3,7 @@
 #============================================================================
 # Copyright (C) 2004 Mike Wray <mike.wray@xxxxxx>
 #============================================================================
-# Move the IP address from eth0 onto the Xen bridge (xen-br0).
+# Move the IP address from eth0 onto the Xen bridge (xenbr0).
 # Only works if the bridge control utils (brctl) have been installed.
 #============================================================================
 
@@ -19,7 +19,7 @@
                  'interface=', 'bridge=', 'create']
 
 defaults['interface'] = 'eth0'
-defaults['bridge'] = 'xen-br0'
+defaults['bridge'] = 'xenbr0'
 
 def usage():
     print """Usage:
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/pygrub/setup.py
--- a/tools/pygrub/setup.py     Fri Oct 21 19:58:39 2005
+++ b/tools/pygrub/setup.py     Mon Oct 24 15:08:13 2005
@@ -12,7 +12,7 @@
     ext2defines = []
     cc = new_compiler()
     cc.add_library("ext2fs")
-    if cc.has_function("ext2fs_open2"):
+    if hasattr(cc, "has_function") and cc.has_function("ext2fs_open2"):
         ext2defines.append( ("HAVE_EXT2FS_OPEN2", None) )
     else:
         sys.stderr.write("WARNING: older version of e2fsprogs installed, not 
building full\n")
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/pygrub/src/fsys/reiser/reisermodule.c
--- a/tools/pygrub/src/fsys/reiser/reisermodule.c       Fri Oct 21 19:58:39 2005
+++ b/tools/pygrub/src/fsys/reiser/reisermodule.c       Mon Oct 24 15:08:13 2005
@@ -46,7 +46,7 @@
 
        if (!dal) return;
 
-       close((size_t)dal->dev);
+       close((int)(unsigned long)dal->dev);
        dal_free(dal);
 }
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/lowlevel/xc/xc.c Mon Oct 24 15:08:13 2005
@@ -42,7 +42,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
     char *corefile;
 
     static char *kwd_list[] = { "dom", "corefile", NULL };
@@ -77,20 +77,64 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32          dom = 0;
-    int          ret;
-    u32          ssidref = 0x0;
-
-    static char *kwd_list[] = { "dom", "ssidref", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
-                                      &dom, &ssidref))
-        return NULL;
-
-    if ( (ret = xc_domain_create(xc->xc_handle, ssidref, &dom)) < 0 )
+    uint32_t dom = 0;
+    int      ret, i;
+    uint32_t ssidref = 0;
+    PyObject *pyhandle = NULL;
+    xen_domain_handle_t handle = { 
+        0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+        0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
+
+    static char *kwd_list[] = { "dom", "ssidref", "handle", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiO", kwd_list,
+                                      &dom, &ssidref, &pyhandle))
+        return NULL;
+
+    if ( pyhandle != NULL )
+    {
+        if ( !PyList_Check(pyhandle) || 
+             (PyList_Size(pyhandle) != sizeof(xen_domain_handle_t)) )
+        {
+        out_exception:
+            errno = EINVAL;
+            PyErr_SetFromErrno(xc_error);
+            return NULL;
+        }
+
+        for ( i = 0; i < sizeof(xen_domain_handle_t); i++ )
+        {
+            PyObject *p = PyList_GetItem(pyhandle, i);
+            if ( !PyInt_Check(p) )
+                goto out_exception;
+            handle[i] = (uint8_t)PyInt_AsLong(p);
+        }
+    }
+
+    if ( (ret = xc_domain_create(xc->xc_handle, ssidref, handle, &dom)) < 0 )
         return PyErr_SetFromErrno(xc_error);
 
     return PyInt_FromLong(dom);
+}
+
+static PyObject *pyxc_domain_max_vcpus(PyObject *self,
+                                            PyObject *args,
+                                            PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+
+    uint32_t dom, max;
+
+    static char *kwd_list[] = { "dom", "max", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, &dom, &max) )
+        return NULL;
+
+    if ( xc_domain_max_vcpus(xc->xc_handle, dom, max) != 0 )
+        return PyErr_SetFromErrno(xc_error);
+    
+    Py_INCREF(zero);
+    return zero;
 }
 
 static PyObject *pyxc_domain_pause(PyObject *self,
@@ -99,7 +143,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
 
     static char *kwd_list[] = { "dom", NULL };
 
@@ -119,7 +163,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
 
     static char *kwd_list[] = { "dom", NULL };
 
@@ -139,7 +183,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
 
     static char *kwd_list[] = { "dom", NULL };
 
@@ -159,17 +203,25 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
-    int vcpu = 0;
-    cpumap_t cpumap = 0xFFFFFFFF;
+    uint32_t dom;
+    int vcpu = 0, i;
+    cpumap_t cpumap = ~0ULL;
+    PyObject *cpulist = NULL;
 
     static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|ii", kwd_list, 
-                                      &dom, &vcpu, &cpumap) )
-        return NULL;
-
-    if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, &cpumap) != 0 )
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list, 
+                                      &dom, &vcpu, &cpulist) )
+        return NULL;
+
+    if ( (cpulist != NULL) && PyList_Check(cpulist) )
+    {
+        cpumap = 0ULL;
+        for ( i = 0; i < PyList_Size(cpulist); i++ ) 
+            cpumap |= (cpumap_t)1 << PyInt_AsLong(PyList_GetItem(cpulist, i));
+    }
+  
+    if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, cpumap) != 0 )
         return PyErr_SetFromErrno(xc_error);
     
     Py_INCREF(zero);
@@ -182,7 +234,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
     float cpuweight = 1;
 
     static char *kwd_list[] = { "dom", "cpuweight", NULL };
@@ -192,6 +244,47 @@
         return NULL;
 
     if ( xc_domain_setcpuweight(xc->xc_handle, dom, cpuweight) != 0 )
+        return PyErr_SetFromErrno(xc_error);
+    
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_domain_sethandle(PyObject *self,
+                                       PyObject *args,
+                                       PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+
+    int i;
+    uint32_t dom;
+    PyObject *pyhandle;
+    xen_domain_handle_t handle;
+
+    static char *kwd_list[] = { "dom", "handle", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iO", kwd_list, 
+                                      &dom, &pyhandle) )
+        return NULL;
+
+    if ( !PyList_Check(pyhandle) || 
+         (PyList_Size(pyhandle) != sizeof(xen_domain_handle_t)) )
+    {
+    out_exception:
+        errno = EINVAL;
+        PyErr_SetFromErrno(xc_error);
+        return NULL;
+    }
+
+    for ( i = 0; i < sizeof(xen_domain_handle_t); i++ )
+    {
+        PyObject *p = PyList_GetItem(pyhandle, i);
+        if ( !PyInt_Check(p) )
+            goto out_exception;
+        handle[i] = (uint8_t)PyInt_AsLong(p);
+    }
+
+    if ( xc_domain_sethandle(xc->xc_handle, dom, handle) < 0 )
         return PyErr_SetFromErrno(xc_error);
     
     Py_INCREF(zero);
@@ -203,9 +296,9 @@
                                      PyObject *kwds)
 {
     XcObject *xc = (XcObject *)self;
-    PyObject *list, *vcpu_list, *cpumap_list, *info_dict;
-
-    u32 first_dom = 0;
+    PyObject *list, *info_dict;
+
+    uint32_t first_dom = 0;
     int max_doms = 1024, nr_doms, i, j;
     xc_dominfo_t *info;
 
@@ -229,19 +322,14 @@
     list = PyList_New(nr_doms);
     for ( i = 0 ; i < nr_doms; i++ )
     {
-        vcpu_list = PyList_New(MAX_VIRT_CPUS);
-        cpumap_list = PyList_New(MAX_VIRT_CPUS);
-        for ( j = 0; j < MAX_VIRT_CPUS; j++ ) {
-            PyList_SetItem( vcpu_list, j, 
-                            Py_BuildValue("i", info[i].vcpu_to_cpu[j]));
-            PyList_SetItem( cpumap_list, j, 
-                            Py_BuildValue("i", info[i].cpumap[j]));
-        }
-                 
-        info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
+        PyObject *pyhandle = PyList_New(sizeof(xen_domain_handle_t));
+        for ( j = 0; j < sizeof(xen_domain_handle_t); j++ )
+            PyList_SetItem(pyhandle, j, PyInt_FromLong(info[i].handle[j]));
+        info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
                                   ",s:l,s:L,s:l,s:i,s:i}",
                                   "dom",       info[i].domid,
-                                  "vcpus",     info[i].vcpus,
+                                  "online_vcpus", info[i].nr_online_vcpus,
+                                  "max_vcpu_id", info[i].max_vcpu_id,
                                   "dying",     info[i].dying,
                                   "crashed",   info[i].crashed,
                                   "shutdown",  info[i].shutdown,
@@ -253,15 +341,55 @@
                                   "maxmem_kb", info[i].max_memkb,
                                   "ssidref",   info[i].ssidref,
                                   "shutdown_reason", info[i].shutdown_reason);
-        PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list );
-        PyDict_SetItemString( info_dict, "cpumap", cpumap_list );
-        PyList_SetItem( list, i, info_dict);
- 
+        PyDict_SetItemString(info_dict, "handle", pyhandle);
+        PyList_SetItem(list, i, info_dict);
     }
 
     free(info);
 
     return list;
+}
+
+static PyObject *pyxc_vcpu_getinfo(PyObject *self,
+                                   PyObject *args,
+                                   PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+    PyObject *info_dict, *cpulist;
+
+    uint32_t dom, vcpu = 0;
+    xc_vcpuinfo_t info;
+    int rc, i;
+    cpumap_t cpumap;
+
+    static char *kwd_list[] = { "dom", "vcpu", NULL };
+    
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
+                                      &dom, &vcpu) )
+        return NULL;
+
+    rc = xc_domain_get_vcpu_info(xc->xc_handle, dom, vcpu, &info);
+    if ( rc < 0 )
+        return PyErr_SetFromErrno(xc_error);
+
+    info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
+                              "online",   info.online,
+                              "blocked",  info.blocked,
+                              "running",  info.running,
+                              "cpu_time", info.cpu_time,
+                              "cpu",      info.cpu);
+
+    cpumap = info.cpumap;
+    cpulist = PyList_New(0);
+    for ( i = 0; cpumap != 0; i++ )
+    {
+        if ( cpumap & 1 )
+            PyList_Append(cpulist, PyInt_FromLong(i));
+        cpumap >>= 1;
+    }
+    PyDict_SetItemString(info_dict, "cpumap", cpulist);
+
+    return info_dict;
 }
 
 static PyObject *pyxc_linux_build(PyObject *self,
@@ -270,9 +398,9 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
     char *image, *ramdisk = NULL, *cmdline = "";
-    int flags = 0, vcpus = 1;
+    int flags = 0;
     int store_evtchn, console_evtchn;
     unsigned long store_mfn = 0;
     unsigned long console_mfn = 0;
@@ -280,19 +408,17 @@
     static char *kwd_list[] = { "dom", "store_evtchn", 
                                 "console_evtchn", "image", 
                                /* optional */
-                               "ramdisk", "cmdline", "flags",
-                               "vcpus", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiis|ssii", kwd_list,
+                               "ramdisk", "cmdline", "flags", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiis|ssi", kwd_list,
                                       &dom, &store_evtchn,
                                      &console_evtchn, &image, 
                                      /* optional */
-                                     &ramdisk, &cmdline, &flags,
-                                      &vcpus) )
+                                     &ramdisk, &cmdline, &flags) )
         return NULL;
 
     if ( xc_linux_build(xc->xc_handle, dom, image,
-                        ramdisk, cmdline, flags, vcpus,
+                        ramdisk, cmdline, flags,
                         store_evtchn, &store_mfn, 
                        console_evtchn, &console_mfn) != 0 )
         return PyErr_SetFromErrno(xc_error);
@@ -303,28 +429,28 @@
 }
 
 static PyObject *pyxc_vmx_build(PyObject *self,
-                                  PyObject *args,
-                                  PyObject *kwds)
-{
-    XcObject *xc = (XcObject *)self;
-
-    u32   dom;
+                                PyObject *args,
+                                PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+
+    uint32_t dom;
     char *image;
-    int   control_evtchn, store_evtchn;
-    int flags = 0, vcpus = 1;
+    int control_evtchn, store_evtchn;
+    int vcpus = 1;
     int memsize;
     unsigned long store_mfn = 0;
 
     static char *kwd_list[] = { "dom", "control_evtchn", "store_evtchn",
-                                "memsize", "image", "flags", "vcpus", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisii", kwd_list,
+                                "memsize", "image", "vcpus", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisi", kwd_list,
                                       &dom, &control_evtchn, &store_evtchn,
-                                      &memsize, &image, &flags, &vcpus) )
+                                      &memsize, &image, &vcpus) )
         return NULL;
 
     if ( xc_vmx_build(xc->xc_handle, dom, memsize, image, control_evtchn,
-                      flags, vcpus, store_evtchn, &store_mfn) != 0 )
+                      vcpus, store_evtchn, &store_mfn) != 0 )
         return PyErr_SetFromErrno(xc_error);
 
     return Py_BuildValue("{s:i}", "store_mfn", store_mfn);
@@ -373,10 +499,10 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
-    u32 mcuadv;
+    uint32_t dom;
+    uint32_t mcuadv;
     int warpback; 
-    s32 warpvalue;
+    int32_t warpvalue;
     long long warpl;
     long long warpu;
 
@@ -401,10 +527,10 @@
                                           PyObject *kwds)
 {
     XcObject *xc = (XcObject *)self;
-    u32 dom;
-    u32 mcuadv;
+    uint32_t dom;
+    uint32_t mcuadv;
     int warpback; 
-    s32 warpvalue;
+    int32_t warpvalue;
     long long warpl;
     long long warpu;
     
@@ -432,7 +558,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom, remote_dom;
+    uint32_t dom, remote_dom;
     int port;
 
     static char *kwd_list[] = { "dom", "remote_dom", NULL };
@@ -454,7 +580,7 @@
     XcObject *xc = (XcObject *)self;
     PyObject *dict;
 
-    u32 dom = DOMID_SELF;
+    uint32_t dom = DOMID_SELF;
     int port, ret;
     xc_evtchn_status_t status;
 
@@ -507,7 +633,7 @@
                                                 PyObject *kwds)
 {
     XcObject *xc = (XcObject *)self;
-    u32 dom;
+    uint32_t dom;
     int bus, dev, func, enable, ret;
 
     static char *kwd_list[] = { "dom", "bus", "dev", "func", "enable", NULL };
@@ -631,13 +757,13 @@
 
 
 static PyObject *pyxc_sedf_domain_set(PyObject *self,
-                                         PyObject *args,
-                                         PyObject *kwds)
-{
-    XcObject *xc = (XcObject *)self;
-    u32 domid;
-    u64 period, slice, latency;
-    u16 extratime, weight;
+                                      PyObject *args,
+                                      PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+    uint32_t domid;
+    uint64_t period, slice, latency;
+    uint16_t extratime, weight;
     static char *kwd_list[] = { "dom", "period", "slice",
                                 "latency", "extratime", "weight",NULL };
     
@@ -654,13 +780,13 @@
 }
 
 static PyObject *pyxc_sedf_domain_get(PyObject *self,
-                                         PyObject *args,
-                                         PyObject *kwds)
-{
-    XcObject *xc = (XcObject *)self;
-    u32 domid;
-    u64 period, slice,latency;
-    u16 weight, extratime;
+                                      PyObject *args,
+                                      PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+    uint32_t domid;
+    uint64_t period, slice,latency;
+    uint16_t weight, extratime;
     
     static char *kwd_list[] = { "dom", NULL };
 
@@ -671,34 +797,13 @@
                                 &slice,&latency,&extratime,&weight) )
         return PyErr_SetFromErrno(xc_error);
 
-    return Py_BuildValue("{s:i,s:L,s:L,s:L,s:i}",
+    return Py_BuildValue("{s:i,s:L,s:L,s:L,s:i,s:i}",
                          "domain",    domid,
                          "period",    period,
                          "slice",     slice,
                         "latency",   latency,
-                        "extratime", extratime);
-}
-
-static PyObject *pyxc_shadow_control(PyObject *self,
-                                     PyObject *args,
-                                     PyObject *kwds)
-{
-    XcObject *xc = (XcObject *)self;
-
-    u32 dom;
-    int op=0;
-
-    static char *kwd_list[] = { "dom", "op", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, 
-                                      &dom, &op) )
-        return NULL;
-
-    if ( xc_shadow_control(xc->xc_handle, dom, op, NULL, 0, NULL) < 0 )
-        return PyErr_SetFromErrno(xc_error);
-    
-    Py_INCREF(zero);
-    return zero;
+                        "extratime", extratime,
+                         "weight",    weight);
 }
 
 static PyObject *pyxc_domain_setmaxmem(PyObject *self,
@@ -707,7 +812,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
     unsigned int maxmem_kb;
 
     static char *kwd_list[] = { "dom", "maxmem_kb", NULL };
@@ -729,7 +834,7 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom;
+    uint32_t dom;
     unsigned long mem_kb;
     unsigned int extent_order = 0 , address_bits = 0;
     unsigned long nr_extents;
@@ -752,23 +857,6 @@
     return zero;
 }
 
-static PyObject *pyxc_init_store(PyObject *self, PyObject *args,
-                                PyObject *kwds)
-{
-    XcObject *xc = (XcObject *)self;
-
-    int remote_port;
-
-    static char *kwd_list[] = { "remote_port", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i", kwd_list, 
-                                      &remote_port) )
-        return NULL;
-
-    return PyInt_FromLong(xc_init_store(xc->xc_handle, remote_port));
-}
-
-
 static PyMethodDef pyxc_methods[] = {
     { "handle",
       (PyCFunction)pyxc_handle,
@@ -783,6 +871,14 @@
       " dom    [int, 0]:        Domain identifier to use (allocated if 
zero).\n"
       "Returns: [int] new domain identifier; -1 on error.\n" },
 
+    { "domain_max_vcpus", 
+      (PyCFunction)pyxc_domain_max_vcpus,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Set the maximum number of VCPUs a domain may create.\n"
+      " dom       [int, 0]:      Domain identifier to use.\n"
+      " max     [int, 0]:      New maximum number of VCPUs in domain.\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
     { "domain_dumpcore", 
       (PyCFunction)pyxc_domain_dumpcore, 
       METH_VARARGS | METH_KEYWORDS, "\n"
@@ -818,7 +914,7 @@
       "Pin a VCPU to a specified set CPUs.\n"
       " dom [int]:     Identifier of domain to which VCPU belongs.\n"
       " vcpu [int, 0]: VCPU being pinned.\n"
-      " cpumap [int, -1]: Bitmap of usable CPUs.\n\n"
+      " cpumap [list, []]: list of usable CPUs.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "domain_setcpuweight", 
@@ -827,6 +923,14 @@
       "Set cpuweight scheduler parameter for domain.\n"
       " dom [int]:            Identifier of domain to be changed.\n"
       " cpuweight [float, 1]: VCPU being pinned.\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "domain_sethandle", 
+      (PyCFunction)pyxc_domain_sethandle,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Set domain's opaque handle.\n"
+      " dom [int]:            Identifier of domain.\n"
+      " handle [list of 16 ints]: New opaque handle.\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "domain_getinfo", 
@@ -852,8 +956,21 @@
       " maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
       " cpu_time [long]: CPU time consumed, in nanoseconds\n"
       " shutdown_reason [int]: Numeric code from guest OS, explaining "
-      "reason why it shut itself down.\n" 
-      " vcpu_to_cpu [[int]]: List that maps VCPUS to CPUS\n" },
+      "reason why it shut itself down.\n" },
+
+    { "vcpu_getinfo", 
+      (PyCFunction)pyxc_vcpu_getinfo, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Get information regarding a VCPU.\n"
+      " dom  [int]:    Domain to retrieve info about.\n"
+      " vcpu [int, 0]: VCPU to retrieve info about.\n\n"
+      "Returns: [dict]\n"
+      " online   [int]:  Bool - Is this VCPU currently online?\n"
+      " blocked  [int]:  Bool - Is this VCPU blocked waiting for an event?\n"
+      " running  [int]:  Bool - Is this VCPU currently running on a CPU?\n"
+      " cpu_time [long]: CPU time consumed, in nanoseconds\n"
+      " cpumap   [int]:  Bitmap of CPUs this VCPU can run on\n"
+      " cpu      [int]:  CPU that this VCPU is currently bound to\n" },
 
     { "linux_build", 
       (PyCFunction)pyxc_linux_build, 
@@ -869,12 +986,10 @@
     { "vmx_build", 
       (PyCFunction)pyxc_vmx_build, 
       METH_VARARGS | METH_KEYWORDS, "\n"
-      "Build a new Linux guest OS.\n"
+      "Build a new VMX guest OS.\n"
       " dom     [int]:      Identifier of domain to build into.\n"
-      " image   [str]:      Name of kernel image file. May be gzipped.\n"
-      " memmap  [str]:             Memory map.\n\n"
-      " ramdisk [str, n/a]: Name of ramdisk file, if any.\n"
-      " cmdline [str, n/a]: Kernel parameters, if any.\n\n"
+      " image   [str]:      Name of VMX loader image file.\n"
+      " vcpus   [int, 1]:   Number of Virtual CPUS in domain.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "bvtsched_global_set",
@@ -995,14 +1110,6 @@
       "Returns [dict]: information about Xen"
       "        [None]: on failure.\n" },
 
-    { "shadow_control", 
-      (PyCFunction)pyxc_shadow_control, 
-      METH_VARARGS | METH_KEYWORDS, "\n"
-      "Set parameter for shadow pagetable interface\n"
-      " dom [int]:   Identifier of domain.\n"
-      " op [int, 0]: operation\n\n"
-      "Returns: [int] 0 on success; -1 on error.\n" },
-
     { "domain_setmaxmem", 
       (PyCFunction)pyxc_domain_setmaxmem, 
       METH_VARARGS | METH_KEYWORDS, "\n"
@@ -1018,13 +1125,6 @@
       " dom [int]: Identifier of domain.\n"
       " mem_kb [long]: .\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
-
-    { "init_store", 
-      (PyCFunction)pyxc_init_store, 
-      METH_VARARGS | METH_KEYWORDS, "\n"
-      "Initialize the store event channel and return the store page mfn.\n"
-      " remote_port [int]: store event channel port number.\n"
-      "Returns: [int] mfn on success; <0 on error.\n" },
 
     { NULL, NULL, 0, NULL }
 };
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/lowlevel/xs/xs.c
--- a/tools/python/xen/lowlevel/xs/xs.c Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/lowlevel/xs/xs.c Mon Oct 24 15:08:13 2005
@@ -28,6 +28,7 @@
 #include <sys/stat.h>
 #include <fcntl.h>
 
+#include <xenctrl.h>
 #include "xs.h"
 
 /** @file
@@ -512,6 +513,7 @@
         goto exit;
     if (!PyArg_ParseTupleAndKeywords(args, kwds, arg_spec, kwd_spec))
         goto exit;
+again:
     Py_BEGIN_ALLOW_THREADS
     xsval = xs_read_watch(xh, &num);
     Py_END_ALLOW_THREADS
@@ -528,8 +530,13 @@
             break;
     }
     if (i == PyList_Size(xsh->watches)) {
-        PyErr_SetString(PyExc_RuntimeError, "invalid token");
-        goto exit;
+      /* We do not have a registered watch for the one that has just fired.
+         Ignore this -- a watch that has been recently deregistered can still
+         have watches in transit.  This is a blocking method, so go back to
+         read again.
+      */
+      free(xsval);
+      goto again;
     }
     /* Create tuple (path, token). */
     val = Py_BuildValue("(sO)", xsval[XS_WATCH_PATH], token);
@@ -679,7 +686,6 @@
        " dom  [int]   : domain id\n"                                   \
        " page [long]  : address of domain's xenstore page\n"           \
        " port [int]   : port the domain is using for xenstore\n"       \
-       " path [string]: path to the domain's data in xenstore\n"       \
        "\n"                                                            \
        "Returns None on success.\n"                                    \
        "Raises RuntimeError on error.\n"                               \
@@ -688,12 +694,11 @@
 static PyObject *xspy_introduce_domain(PyObject *self, PyObject *args,
                                        PyObject *kwds)
 {
-    static char *kwd_spec[] = { "dom", "page", "port", "path", NULL };
-    static char *arg_spec = "iiis|";
+    static char *kwd_spec[] = { "dom", "page", "port", NULL };
+    static char *arg_spec = "iii";
     domid_t dom = 0;
     unsigned long page = 0;
     unsigned int port = 0;
-    char *path = NULL;
 
     struct xs_handle *xh = xshandle(self);
     PyObject *val = NULL;
@@ -702,10 +707,10 @@
     if (!xh)
         goto exit;
     if (!PyArg_ParseTupleAndKeywords(args, kwds, arg_spec, kwd_spec,
-                                     &dom, &page, &port, &path))
-        goto exit;
-    Py_BEGIN_ALLOW_THREADS
-    xsval = xs_introduce_domain(xh, dom, page, port, path);
+                                     &dom, &page, &port))
+        goto exit;
+    Py_BEGIN_ALLOW_THREADS
+    xsval = xs_introduce_domain(xh, dom, page, port);
     Py_END_ALLOW_THREADS
     if (!xsval) {
         PyErr_SetFromErrno(PyExc_RuntimeError);
@@ -790,11 +795,10 @@
 }
 
 #define xspy_get_domain_path_doc "\n"                  \
-       "Return store path of domain.\n"                \
+       "Return store path of domain, whether or not the domain exists.\n" \
        " domid [int]: domain id\n"                     \
        "\n"                                            \
        "Returns: [string] domain store path.\n"        \
-       "         None if domid doesn't exist.\n"       \
        "Raises RuntimeError on error.\n"               \
        "\n"
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/util/auxbin.py
--- a/tools/python/xen/util/auxbin.py   Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/util/auxbin.py   Mon Oct 24 15:08:13 2005
@@ -19,6 +19,10 @@
 LIB_BIN_32 = "/usr/lib/xen/bin"
 LIB_BIN_64 = "/usr/lib64/xen/bin"
 
+## The architectures on which the LIB_BIN_64 directory is used.  This
+# deliberately excludes ia64.
+LIB_64_ARCHS = [ 'x86_64', 'ppc64', 's390x', 'sparc64']
+
 
 import os
 import os.path
@@ -38,7 +42,7 @@
 
 def path():
     machine = os.uname()[4]
-    if machine.find('64') != -1 and os.path.exists(LIB_BIN_64):
+    if machine in LIB_64_ARCHS and os.path.exists(LIB_BIN_64):
         return LIB_BIN_64
     else:
         return LIB_BIN_32
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/web/SrvBase.py
--- a/tools/python/xen/web/SrvBase.py   Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/web/SrvBase.py   Mon Oct 24 15:08:13 2005
@@ -86,7 +86,7 @@
             except Exception, exn:
                 log.exception("Request %s failed.", op)
                 if req.useSxp():
-                    return ['xend.err', "Exception: " + str(exn)]
+                    return ['xend.err', str(exn)]
                 else:
                     return "<p>%s</p>" % str(exn)
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/Vifctl.py
--- a/tools/python/xen/xend/Vifctl.py   Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/Vifctl.py   Mon Oct 24 15:08:13 2005
@@ -19,87 +19,17 @@
 """Xend interface to networking control scripts.
 """
 import os
-import os.path
-import xen.util.process
 
-from xen.xend import XendRoot
-xroot = XendRoot.instance()
+import XendRoot
 
-"""Where network control scripts live."""
-SCRIPT_DIR = xroot.network_script_dir
 
-def network(op, script=None, bridge=None, antispoof=None):
+def network(op):
     """Call a network control script.
-    Xend calls this with op 'start' when it starts.
 
-    @param op:        operation (start, stop, status)
-    @param script:    network script name
-    @param bridge:    xen bridge
-    @param antispoof: whether to enable IP antispoofing rules
+    @param op: operation (start, stop)
     """
-    if op not in ['start', 'stop', 'status']:
-        raise ValueError('Invalid operation:' + op)
-    if script is None:
-        script = xroot.get_network_script()
-    if bridge is None:
-        bridge = xroot.get_vif_bridge()
-    if antispoof is None:
-        antispoof = xroot.get_vif_antispoof()
-    script = os.path.join(SCRIPT_DIR, script)
-    args = [op]
-    args.append("bridge='%s'" % bridge)
-    if antispoof:
-        args.append("antispoof=yes")
-    else:
-        args.append("antispoof=no")
-    args = ' '.join(args)
-    ret = xen.util.process.runscript(script + ' ' + args)
-    if len(ret):
-        return ret.splitlines()[0]
-
-def set_vif_name(vif_old, vif_new):
-    if vif_old == vif_new:
-        vif = vif_new
-        return vif
-    if os.system("ip link show %s" % vif_old) == 0:
-        os.system("ip link set %s down" % vif_old)
-        os.system("ip link set %s name %s" % (vif_old, vif_new))
-        os.system("ip link set %s up" % vif_new)
-    if os.system("ip link show %s" % vif_new) == 0:
-        vif = vif_new
-    else:
-        vif = vif_old
-    return vif
-
-def vifctl(op, vif=None, script=None, domain=None, mac=None, bridge=None, 
ipaddr=None):
-    """Call a vif control script.
-    Xend calls this when bringing vifs up or down.
-
-    @param op:     vif operation (up, down)
-    @param vif:    vif name
-    @param script: name of control script
-    @param domain: name of domain the vif is on
-    @param mac:    vif MAC address
-    @param bridge: bridge to add the vif to
-    @param ipaddr: list of ipaddrs the vif may use
-    """
-    if op not in ['up', 'down']:
-        raise ValueError('Invalid operation:' + op)
-    if script is None:
-        script = xroot.get_vif_script()
-    if bridge is None:
-        bridge = xroot.get_vif_bridge()
-    script = os.path.join(SCRIPT_DIR, script)
-    args = [op]
-    args.append("vif='%s'" % vif)
-    args.append("domain='%s'" % domain)
-    args.append("mac='%s'" % mac)
-    if bridge:
-        args.append("bridge='%s'" % bridge)
-    if ipaddr:
-        ips = ' '.join(ipaddr)
-        args.append("ip='%s'" % ips)
-    args = ' '.join(args)
-    ret = xen.util.process.runscript(script + ' ' + args)
-    if len(ret):
-        return ret.splitlines()[0]
+    if op not in ['start', 'stop']:
+        raise ValueError('Invalid operation: ' + op)
+    script = XendRoot.instance().get_network_script()
+    if script:
+        os.spawnl(os.P_WAIT, script, script, op)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py   Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/XendCheckpoint.py   Mon Oct 24 15:08:13 2005
@@ -118,9 +118,11 @@
 
     dominfo = xd.restore_(vmconfig)
 
-    assert dominfo.store_channel
-    assert dominfo.console_channel
-    assert dominfo.getDomainPath()
+    store_port   = dominfo.getStorePort()
+    console_port = dominfo.getConsolePort()
+
+    assert store_port
+    assert console_port
 
     try:
         l = read_exact(fd, sizeof_unsigned_long,
@@ -130,34 +132,21 @@
             raise XendError(
                 "not a valid guest state file: pfn count out of range")
 
-        store_evtchn = dominfo.store_channel
-        console_evtchn = dominfo.console_channel
-
-        cmd = [xen.util.auxbin.pathTo(XC_RESTORE), str(xc.handle()), str(fd),
-               str(dominfo.getDomid()), str(nr_pfns),
-               str(store_evtchn), str(console_evtchn)]
+        cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
+                        xc.handle(), fd, dominfo.getDomid(), nr_pfns,
+                        store_port, console_port])
         log.debug("[xc_restore]: %s", string.join(cmd))
 
-        def restoreInputHandler(line, _):
-            m = re.match(r"^(store-mfn) (\d+)$", line)
-            if m:
-                store_mfn = int(m.group(2))
-                dominfo.setStoreRef(store_mfn)
-                log.debug("IntroduceDomain %d %d %d %s",
-                          dominfo.getDomid(),
-                          store_mfn,
-                          dominfo.store_channel,
-                          dominfo.getDomainPath())
-                IntroduceDomain(dominfo.getDomid(),
-                                store_mfn,
-                                dominfo.store_channel,
-                                dominfo.getDomainPath())
-            else:
-                m = re.match(r"^(console-mfn) (\d+)$", line)
-                if m:
-                    dominfo.setConsoleRef(int(m.group(2)))
-
-        forkHelper(cmd, fd, restoreInputHandler, True)
+        handler = RestoreInputHandler()
+
+        forkHelper(cmd, fd, handler.handler, True)
+
+        if handler.store_mfn is None or handler.console_mfn is None:
+            raise XendError('Could not read store/console MFN')
+
+        dominfo.unpause()
+
+        dominfo.completeRestore(handler.store_mfn, handler.console_mfn)
 
         return dominfo
     except:
@@ -165,12 +154,29 @@
         raise
 
 
+class RestoreInputHandler:
+    def __init__(self):
+        self.store_mfn = None
+        self.console_mfn = None
+
+
+    def handler(self, line, _):
+        m = re.match(r"^(store-mfn) (\d+)$", line)
+        if m:
+            self.store_mfn = int(m.group(2))
+        else:
+            m = re.match(r"^(console-mfn) (\d+)$", line)
+            if m:
+                self.console_mfn = int(m.group(2))
+
+
 def forkHelper(cmd, fd, inputHandler, closeToChild):
     child = xPopen3(cmd, True, -1, [fd, xc.handle()])
 
     if closeToChild:
         child.tochild.close()
 
+    lasterr = "error unknown"
     try:
         fds = [child.fromchild.fileno(),
                child.childerr.fileno()]
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/XendClient.py
--- a/tools/python/xen/xend/XendClient.py       Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/XendClient.py       Mon Oct 24 15:08:13 2005
@@ -199,6 +199,9 @@
     def xend_list_domains(self):
         return self.xendGet(self.domainurl(), {'detail': '1'})
 
+    def xend_domain_vcpuinfo(self, dom):
+        return self.xendGet(self.domainurl(dom), {'op': 'vcpuinfo'})
+
     def xend_domain_create(self, conf):
         return self.xendPost(self.domainurl(),
                              {'op'      : 'create',
@@ -255,7 +258,7 @@
         return self.xendPost(self.domainurl(id),
                              {'op'      : 'pincpu',
                               'vcpu'    : vcpu,
-                              'cpumap'  : cpumap })
+                              'cpumap'  : str(cpumap) })
 
     def xend_domain_cpu_bvt_set(self, id, mcuadv, warpback, warpvalue, warpl, 
warpu):
         return self.xendPost(self.domainurl(id),
@@ -266,6 +269,10 @@
                               'warpl'    : warpl,
                               'warpu'    : warpu })
 
+    def xend_domain_cpu_sedf_get(self, id):
+        return self.xendPost(self.domainurl(id),
+                             {'op' : 'cpu_sedf_get'})
+
     def xend_domain_cpu_sedf_set(self, id, period, slice, latency, extratime, 
weight):
         return self.xendPost(self.domainurl(id),
                              {'op'        : 'cpu_sedf_set',
@@ -286,11 +293,10 @@
                              'target'    : mem_target })
         return val
 
-    def xend_domain_vcpu_hotplug(self, id, vcpu, state):
-        return self.xendPost(self.domainurl(id),
-                            {'op'         : 'vcpu_hotplug',
-                             'vcpu'       : vcpu,
-                             'state'      : state })
+    def xend_domain_set_vcpus(self, dom, vcpus):
+        return self.xendPost(self.domainurl(dom),
+                            {'op'    : 'set_vcpus',
+                             'vcpus' : vcpus })
 
     def xend_domain_vif_limit(self, id, vif, credit, period):
         return self.xendPost(self.domainurl(id),
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/XendDomain.py       Mon Oct 24 15:08:13 2005
@@ -21,8 +21,10 @@
  Nothing here is persistent (across reboots).
  Needs to be persistent for one uptime.
 """
+
+import logging
 import os
-import logging
+import sys
 import threading
 
 import xen.lowlevel.xc
@@ -57,13 +59,16 @@
         # So we stuff the XendDomain instance (self) into xroot's components.
         xroot.add_component("xen.xend.XendDomain", self)
         self.domains = {}
-        self.domains_lock = threading.Condition()
+        self.domains_lock = threading.RLock()
         self.watchReleaseDomain()
 
         self.domains_lock.acquire()
         try:
+            self._add_domain(
+                XendDomainInfo.recreate(self.xen_domains()[PRIV_DOMAIN],
+                                        True))
+            self.dom0_setup()
             self.refresh(True)
-            self.dom0_setup()
         finally:
             self.domains_lock.release()
 
@@ -129,7 +134,14 @@
     def dom0_setup(self):
         """Expects to be protected by the domains_lock."""
         dom0 = self.domains[PRIV_DOMAIN]
-        dom0.dom0_enforce_vcpus()
+
+        # get max number of vcpus to use for dom0 from config
+        target = int(xroot.get_dom0_vcpus())
+        log.debug("number of vcpus to use is %d", target)
+   
+        # target == 0 means use all processors
+        if target > 0:
+            dom0.setVCpuCount(target)
 
 
     def _add_domain(self, info):
@@ -171,25 +183,23 @@
                             'Cannot recreate information for dying domain %d.'
                             '  Xend will ignore this domain from now on.',
                             doms[d]['dom'])
+                elif d == PRIV_DOMAIN:
+                    log.fatal(
+                        "No record of privileged domain %d!  Terminating.", d)
+                    sys.exit(1)
                 else:
                     try:
-                        dominfo = XendDomainInfo.recreate(doms[d])
-                        self._add_domain(dominfo)
+                        self._add_domain(
+                            XendDomainInfo.recreate(doms[d], False))
                     except:
-                        if d == PRIV_DOMAIN:
-                            log.exception(
-                                "Failed to recreate information for domain "
-                                "%d.  Doing nothing except crossing my "
-                                "fingers.", d)
-                        else:
-                            log.exception(
-                                "Failed to recreate information for domain "
-                                "%d.  Destroying it in the hope of "
-                                "recovery.", d)
-                            try:
-                                xc.domain_destroy(dom = d)
-                            except:
-                                log.exception('Destruction of %d failed.', d)
+                        log.exception(
+                            "Failed to recreate information for domain "
+                            "%d.  Destroying it in the hope of "
+                            "recovery.", d)
+                        try:
+                            xc.domain_destroy(dom = d)
+                        except:
+                            log.exception('Destruction of %d failed.', d)
 
 
     ## public:
@@ -318,12 +328,6 @@
             n = len(matching)
             if n == 1:
                 return matching[0]
-            elif n > 1:
-                log.error('Name uniqueness has been violated for name %s!  '
-                          'Recovering by renaming:', name)
-                for d in matching:
-                    d.renameUniquely()
-
             return None
         finally:
             self.domains_lock.release()
@@ -343,7 +347,7 @@
             dominfo = self.domain_lookup(domid)
             log.info("Domain %s (%d) unpaused.", dominfo.getName(),
                      dominfo.getDomid())
-            return xc.domain_unpause(dom=dominfo.getDomid())
+            return dominfo.unpause()
         except Exception, ex:
             raise XendError(str(ex))
 
@@ -354,7 +358,7 @@
             dominfo = self.domain_lookup(domid)
             log.info("Domain %s (%d) paused.", dominfo.getName(),
                      dominfo.getDomid())
-            return xc.domain_pause(dom=dominfo.getDomid())
+            return dominfo.pause()
         except Exception, ex:
             raise XendError(str(ex))
 
@@ -408,9 +412,12 @@
     def domain_pincpu(self, domid, vcpu, cpumap):
         """Set which cpus vcpu can use
 
-        @param cpumap:  bitmap of usable cpus
-        """
-        dominfo = self.domain_lookup(domid)
+        @param cpumap:  string repr of list of usable cpus
+        """
+        dominfo = self.domain_lookup(domid)
+        # convert cpumap string into a list of ints
+        cpumap = map(lambda x: int(x),
+                     cpumap.replace("[", "").replace("]", "").split(","))
         try:
             return xc.domain_pincpu(dominfo.getDomid(), vcpu, cpumap)
         except Exception, ex:
@@ -469,14 +476,7 @@
         if not dev:
             raise XendError("invalid vif")
         return dev.setCreditLimit(credit, period)
-        
-    def domain_shadow_control(self, domid, op):
-        """Shadow page control."""
-        dominfo = self.domain_lookup(domid)
-        try:
-            return xc.shadow_control(dominfo.getDomid(), op)
-        except Exception, ex:
-            raise XendError(str(ex))
+
 
     def domain_maxmem_set(self, domid, mem):
         """Set the memory limit for a domain.
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/XendDomainInfo.py   Mon Oct 24 15:08:13 2005
@@ -24,10 +24,10 @@
 
 """
 
+import logging
 import string
 import time
 import threading
-import errno
 
 import xen.lowlevel.xc
 from xen.util import asserts
@@ -38,11 +38,10 @@
 from xen.xend import sxp
 from xen.xend import XendRoot
 from xen.xend.XendBootloader import bootloader
-from xen.xend.XendLogging import log
 from xen.xend.XendError import XendError, VmError
 from xen.xend.XendRoot import get_component
 
-from uuid import getUuid
+import uuid
 
 from xen.xend.xenstore.xstransact import xstransact
 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain
@@ -78,29 +77,25 @@
     "rename-restart"
     ]
 
-STATE_VM_OK         = "ok"
-STATE_VM_TERMINATED = "terminated"
-
-"""Flag for a block device backend domain."""
-SIF_BLK_BE_DOMAIN = (1<<4)
-
-"""Flag for a net device backend domain."""
-SIF_NET_BE_DOMAIN = (1<<5)
-
-"""Flag for a TPM device backend domain."""
-SIF_TPM_BE_DOMAIN = (1<<7)
-
+STATE_DOM_OK       = 1
+STATE_DOM_SHUTDOWN = 2
 
 SHUTDOWN_TIMEOUT = 30
-
 
 DOMROOT = '/local/domain/'
 VMROOT  = '/vm/'
 
 ZOMBIE_PREFIX = 'Zombie-'
 
+"""Minimum time between domain restarts in seconds."""
+MINIMUM_RESTART_TIME = 20
+
+
 xc = xen.lowlevel.xc.new()
 xroot = XendRoot.instance()
+
+log = logging.getLogger("xend.XendDomainInfo")
+#log.setLevel(logging.TRACE)
 
 
 ## Configuration entries that we expect to round-trip -- be read from the
@@ -111,6 +106,7 @@
 # file, so those are handled separately.
 ROUNDTRIPPING_CONFIG_ENTRIES = [
         ('name',         str),
+        ('uuid',         str),
         ('ssidref',      int),
         ('vcpus',        int),
         ('vcpu_avail',   int),
@@ -130,8 +126,6 @@
 #                its VCPUs.  This is translated to
 #                <dompath>/cpu/<id>/availability = {online,offline} for use
 #                by the guest domain.
-#   vcpu_to_cpu: the current mapping between virtual CPUs and the physical
-#                CPU it is using.
 #   cpumap:      a list of bitmaps, one for each VCPU, giving the physical
 #                CPUs that that VCPU may use.
 #   cpu:         a configuration setting requesting that VCPU 0 is pinned to
@@ -152,12 +146,10 @@
 
     log.debug("XendDomainInfo.create(%s)", config)
 
-    vm = XendDomainInfo(getUuid(), parseConfig(config))
+    vm = XendDomainInfo(parseConfig(config))
     try:
         vm.construct()
         vm.initDomain()
-        vm.construct_image()
-        vm.configure()
         vm.storeVmDetails()
         vm.storeDomDetails()
         vm.refreshShutdown()
@@ -168,7 +160,7 @@
         raise
 
 
-def recreate(xeninfo):
+def recreate(xeninfo, priv):
     """Create the VM object for an existing domain.  The domain must not
     be dying, as the paths in the store should already have been removed,
     and asking us to recreate them causes problems."""
@@ -178,40 +170,43 @@
     assert not xeninfo['dying']
 
     domid = xeninfo['dom']
+    uuid1 = xeninfo['handle']
+    xeninfo['uuid'] = uuid.toString(uuid1)
+    dompath = GetDomainPath(domid)
+    if not dompath:
+        raise XendError(
+            'No domain path in store for existing domain %d' % domid)
+
+    log.info("Recreating domain %d, UUID %s.", domid, xeninfo['uuid'])
     try:
-        dompath = GetDomainPath(domid)
-        if not dompath:
-            raise XendError(
-                'No domain path in store for existing domain %d' % domid)
         vmpath = xstransact.Read(dompath, "vm")
         if not vmpath:
             raise XendError(
                 'No vm path in store for existing domain %d' % domid)
-        uuid = xstransact.Read(vmpath, "uuid")
-        if not uuid:
+        uuid2_str = xstransact.Read(vmpath, "uuid")
+        if not uuid2_str:
             raise XendError(
                 'No vm/uuid path in store for existing domain %d' % domid)
 
-        log.info("Recreating domain %d, UUID %s.", domid, uuid)
-
-        vm = XendDomainInfo(uuid, xeninfo, domid, True)
+        uuid2 = uuid.fromString(uuid2_str)
+
+        if uuid1 != uuid2:
+            raise XendError(
+                'Uuid in store does not match uuid for existing domain %d: '
+                '%s != %s' % (domid, uuid2_str, xeninfo['uuid']))
+
+        vm = XendDomainInfo(xeninfo, domid, dompath, True)
 
     except Exception, exn:
-        log.warn(str(exn))
-
-        uuid = getUuid()
-
-        log.info("Recreating domain %d with new UUID %s.", domid, uuid)
-
-        vm = XendDomainInfo(uuid, xeninfo, domid, True)
+        if priv:
+            log.warn(str(exn))
+
+        vm = XendDomainInfo(xeninfo, domid, dompath, True)
         vm.removeDom()
+        vm.removeVm()
         vm.storeVmDetails()
         vm.storeDomDetails()
 
-    vm.create_channel()
-    if domid == 0:
-        vm.initStoreConnection()
-
     vm.refreshShutdown(xeninfo)
     return vm
 
@@ -224,15 +219,13 @@
 
     log.debug("XendDomainInfo.restore(%s)", config)
 
-    uuid = sxp.child_value(config, 'uuid')
-    vm = XendDomainInfo(uuid, parseConfig(config))
+    vm = XendDomainInfo(parseConfig(config))
     try:
         vm.construct()
-        vm.configure()
-        vm.create_channel()
         vm.storeVmDetails()
+        vm.createDevices()
+        vm.createChannels()
         vm.storeDomDetails()
-        vm.refreshShutdown()
         return vm
     except:
         vm.destroy()
@@ -293,7 +286,7 @@
     restart = get_cfg('restart')
     if restart:
         def handle_restart(event, val):
-            if not event in result:
+            if result[event] is None:
                 result[event] = val
 
         if restart == "onreboot":
@@ -346,18 +339,15 @@
         log.debug("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
     return None
 
+
 class XendDomainInfo:
-    """Virtual machine object."""
-
-    """Minimum time between domain restarts in seconds.
-    """
-    MINIMUM_RESTART_TIME = 20
-
-
-    def __init__(self, uuid, info, domid = None, augment = False):
-
-        self.uuid = uuid
+
+    def __init__(self, info, domid = None, dompath = None, augment = False):
+
         self.info = info
+
+        if not self.infoIsSet('uuid'):
+            self.info['uuid'] = uuid.toString(uuid.create())
 
         if domid is not None:
             self.domid = domid
@@ -366,11 +356,8 @@
         else:
             self.domid = None
 
-        self.vmpath  = VMROOT + uuid
-        if self.domid is None:
-            self.dompath = None
-        else:
-            self.dompath = DOMROOT + str(self.domid)
+        self.vmpath  = VMROOT + self.info['uuid']
+        self.dompath = dompath
 
         if augment:
             self.augmentInfo()
@@ -379,15 +366,17 @@
 
         self.image = None
 
-        self.store_channel = None
+        self.store_port = None
         self.store_mfn = None
-        self.console_channel = None
+        self.console_port = None
         self.console_mfn = None
 
-        self.state = STATE_VM_OK
+        self.state = STATE_DOM_OK
         self.state_updated = threading.Condition()
         self.refresh_shutdown_lock = threading.Condition()
 
+
+    ## private:
 
     def augmentInfo(self):
         """Augment self.info, as given to us through {@link #recreate}, with
@@ -436,7 +425,10 @@
             defaultInfo('on_crash',     lambda: "restart")
             defaultInfo('cpu',          lambda: None)
             defaultInfo('cpu_weight',   lambda: 1.0)
-            defaultInfo('vcpus',        lambda: 1)
+            defaultInfo('vcpus',        lambda: int(1))
+
+            self.info['vcpus'] = int(self.info['vcpus'])
+
             defaultInfo('vcpu_avail',   lambda: (1 << self.info['vcpus']) - 1)
             defaultInfo('bootloader',   lambda: None)
             defaultInfo('backend',      lambda: [])
@@ -509,11 +501,6 @@
             if self.info['maxmem_KiB'] > self.info['memory_KiB']:
                 self.info['maxmem_KiB'] = self.info['memory_KiB']
 
-            # Validate the given backend names.
-            for s in self.info['backend']:
-                if s not in backendFlags:
-                    raise VmError('Invalid backend type: %s' % s)
-
             for (n, c) in self.info['device']:
                 if not n or not c or n not in controllerClasses:
                     raise VmError('invalid device (%s, %s)' %
@@ -541,28 +528,51 @@
     def gatherVm(self, *args):
         return xstransact.Gather(self.vmpath, *args)
 
+
+    ## public:
+
     def storeVm(self, *args):
         return xstransact.Store(self.vmpath, *args)
 
+
+    ## private:
+
     def readDom(self, *args):
         return xstransact.Read(self.dompath, *args)
 
     def writeDom(self, *args):
         return xstransact.Write(self.dompath, *args)
 
+
+    ## public:
+
     def removeDom(self, *args):
         return xstransact.Remove(self.dompath, *args)
 
-    def gatherDom(self, *args):
-        return xstransact.Gather(self.dompath, *args)
+
+    ## private:
 
     def storeDom(self, *args):
         return xstransact.Store(self.dompath, *args)
 
 
+    ## public:
+
+    def completeRestore(self, store_mfn, console_mfn):
+
+        log.debug("XendDomainInfo.completeRestore")
+
+        self.store_mfn = store_mfn
+        self.console_mfn = console_mfn
+
+        self.introduceDomain()
+        self.storeDomDetails()
+        self.refreshShutdown()
+
+
     def storeVmDetails(self):
         to_store = {
-            'uuid':               self.uuid,
+            'uuid':               self.info['uuid'],
 
             # XXX
             'memory/target':      str(self.info['memory_KiB'])
@@ -585,26 +595,39 @@
         to_store = {
             'domid':              str(self.domid),
             'vm':                 self.vmpath,
-
+            'console/limit':      str(xroot.get_console_limit() * 1024),
             'memory/target':      str(self.info['memory_KiB'])
             }
 
-        for (k, v) in self.info.items():
-            if v:
-                to_store[k] = str(v)
-
+        def f(n, v):
+            if v is not None:
+                to_store[n] = str(v)
+
+        f('console/port',     self.console_port)
+        f('console/ring-ref', self.console_mfn)
+        f('store/port',       self.store_port)
+        f('store/ring-ref',   self.store_mfn)
+
+        to_store.update(self.vcpuDomDetails())
+
+        log.debug("Storing domain details: %s", to_store)
+
+        self.writeDom(to_store)
+
+
+    ## private:
+
+    def vcpuDomDetails(self):
         def availability(n):
             if self.info['vcpu_avail'] & (1 << n):
                 return 'online'
             else:
                 return 'offline'
 
+        result = {}
         for v in range(0, self.info['vcpus']):
-            to_store["cpu/%d/availability" % v] = availability(v)
-
-        log.debug("Storing domain details: %s", to_store)
-
-        self.writeDom(to_store)
+            result["cpu/%d/availability" % v] = availability(v)
+        return result
 
 
     def setDomid(self, domid):
@@ -629,11 +652,26 @@
     def getDomainPath(self):
         return self.dompath
 
-    def getUuid(self):
-        return self.uuid
+
+    def getStorePort(self):
+        """For use only by image.py and XendCheckpoint.py."""
+        return self.store_port
+
+
+    def getConsolePort(self):
+        """For use only by image.py and XendCheckpoint.py"""
+        return self.console_port
+
 
     def getVCpuCount(self):
         return self.info['vcpus']
+
+
+    def setVCpuCount(self, vcpus):
+        self.info['vcpu_avail'] = (1 << vcpus) - 1
+        self.storeVm('vcpu_avail', self.info['vcpu_avail'])
+        self.writeDom(self.vcpuDomDetails())
+
 
     def getSsidref(self):
         return self.info['ssidref']
@@ -641,15 +679,6 @@
     def getMemoryTarget(self):
         """Get this domain's target memory size, in KiB."""
         return self.info['memory_KiB']
-
-    def setStoreRef(self, ref):
-        self.store_mfn = ref
-        self.storeDom("store/ring-ref", ref)
-
-
-    def getBackendFlags(self):
-        return reduce(lambda x, y: x | backendFlags[y],
-                      self.info['backend'], 0)
 
 
     def refreshShutdown(self, xeninfo = None):
@@ -685,6 +714,11 @@
                 return
 
             elif xeninfo['crashed']:
+                if self.readDom('xend/shutdown_completed'):
+                    # We've seen this shutdown already, but we are preserving
+                    # the domain for debugging.  Leave it alone.
+                    return
+
                 log.warn('Domain has crashed: name=%s id=%d.',
                          self.info['name'], self.domid)
 
@@ -708,7 +742,7 @@
                     self.clearRestart()
 
                     if reason == 'suspend':
-                        self.state_set(STATE_VM_TERMINATED)
+                        self.state_set(STATE_DOM_SHUTDOWN)
                         # Don't destroy the domain.  XendCheckpoint will do
                         # this once it has finished.
                     elif reason in ['poweroff', 'reboot']:
@@ -716,6 +750,11 @@
                     else:
                         self.destroy()
 
+            elif self.dompath is None:
+                # We have yet to manage to call introduceDomain on this
+                # domain.  This can happen if a restore is in progress, or has
+                # failed.  Ignore this domain.
+                pass
             else:
                 # Domain is alive.  If we are shutting it down, then check
                 # the timeout on that, and destroy it if necessary.
@@ -783,11 +822,6 @@
 
     ## public:
 
-    def setConsoleRef(self, ref):
-        self.console_mfn = ref
-        self.storeDom("console/ring-ref", ref)
-
-
     def setMemoryTarget(self, target):
         """Set the memory target of this domain.
         @param target In MiB.
@@ -802,7 +836,7 @@
         """Update with info from xc.domain_getinfo().
         """
 
-        log.debug("XendDomainInfo.update(%s) on domain %d", info, self.domid)
+        log.trace("XendDomainInfo.update(%s) on domain %d", info, self.domid)
 
         if not info:
             info = dom_get(self.domid)
@@ -813,7 +847,7 @@
         self.validateInfo()
         self.refreshShutdown(info)
 
-        log.debug("XendDomainInfo.update done on domain %d: %s", self.domid,
+        log.trace("XendDomainInfo.update done on domain %d: %s", self.domid,
                   self.info)
 
 
@@ -821,19 +855,23 @@
 
     def state_set(self, state):
         self.state_updated.acquire()
-        if self.state != state:
-            self.state = state
-            self.state_updated.notifyAll()
-        self.state_updated.release()
+        try:
+            if self.state != state:
+                self.state = state
+                self.state_updated.notifyAll()
+        finally:
+            self.state_updated.release()
 
 
     ## public:
 
     def waitForShutdown(self):
         self.state_updated.acquire()
-        while self.state == STATE_VM_OK:
-            self.state_updated.wait()
-        self.state_updated.release()
+        try:
+            while self.state == STATE_DOM_OK:
+                self.state_updated.wait()
+        finally:
+            self.state_updated.release()
 
 
     def __str__(self):
@@ -865,11 +903,11 @@
         return self.getDeviceController(deviceClass).destroyDevice(devid)
 
 
-    ## private:
-
     def getDeviceSxprs(self, deviceClass):
         return self.getDeviceController(deviceClass).sxprs()
 
+
+    ## private:
 
     def getDeviceConfigurations(self, deviceClass):
         return self.getDeviceController(deviceClass).configurations()
@@ -887,7 +925,6 @@
     def sxpr(self):
         sxpr = ['domain',
                 ['domid',   self.domid],
-                ['uuid',    self.uuid],
                 ['memory',  self.info['memory_KiB'] / 1024]]
 
         for e in ROUNDTRIPPING_CONFIG_ENTRIES:
@@ -925,11 +962,6 @@
         if self.infoIsSet('cpu_time'):
             sxpr.append(['cpu_time', self.info['cpu_time']/1e9])
         sxpr.append(['vcpus', self.info['vcpus']])
-        if self.infoIsSet('cpumap'):
-            sxpr.append(['cpumap', self.info['cpumap']])
-        if self.infoIsSet('vcpu_to_cpu'):
-            sxpr.append(['cpu', self.info['vcpu_to_cpu'][0]])
-            sxpr.append(['vcpu_to_cpu', self.prettyVCpuMap()])
             
         if self.infoIsSet('start_time'):
             up_time =  time.time() - self.info['start_time']
@@ -944,12 +976,37 @@
         return sxpr
 
 
+    def getVCPUInfo(self):
+        try:
+            def filter_cpumap(map, max):
+                return filter(lambda x: x >= 0, map[0:max])
+
+            # We include the domain name and ID, to help xm.
+            sxpr = ['domain',
+                    ['domid',      self.domid],
+                    ['name',       self.info['name']],
+                    ['vcpu_count', self.info['vcpus']]]
+
+            for i in range(0, self.info['vcpus']):
+                info = xc.vcpu_getinfo(self.domid, i)
+
+                sxpr.append(['vcpu',
+                             ['number',   i],
+                             ['online',   info['online']],
+                             ['blocked',  info['blocked']],
+                             ['running',  info['running']],
+                             ['cpu_time', info['cpu_time'] / 1e9],
+                             ['cpu',      info['cpu']],
+                             ['cpumap',   filter_cpumap(info['cpumap'],
+                                                        self.info['vcpus'])]])
+
+            return sxpr
+
+        except RuntimeError, exn:
+            raise XendError(str(exn))
+                      
+
     ## private:
-
-    def prettyVCpuMap(self):
-        return '|'.join(map(str,
-                            self.info['vcpu_to_cpu'][0:self.info['vcpus']]))
-
 
     def check_name(self, name):
         """Check if a vm name is valid. Valid names contain alphabetic 
characters,
@@ -988,17 +1045,28 @@
                   self.domid,
                   self.info['ssidref'])
 
-        self.domid = xc.domain_create(dom = 0, ssidref = self.info['ssidref'])
+        self.domid = xc.domain_create(
+            dom = 0, ssidref = self.info['ssidref'],
+            handle = uuid.fromString(self.info['uuid']))
 
         if self.domid < 0:
             raise VmError('Creating domain failed: name=%s' %
                           self.info['name'])
 
-        self.dompath = DOMROOT + str(self.domid)
-
-        # Ensure that the domain entry is clean.  This prevents a stale
-        # shutdown_start_time from killing the domain, for example.
+        self.dompath = GetDomainPath(self.domid)
+
         self.removeDom()
+
+        # Set maximum number of vcpus in domain
+        xc.domain_max_vcpus(self.domid, int(self.info['vcpus']))
+
+
+    def introduceDomain(self):
+        assert self.domid is not None
+        assert self.store_mfn is not None
+        assert self.store_port is not None
+        
+        IntroduceDomain(self.domid, self.store_mfn, self.store_port)
 
 
     def initDomain(self):
@@ -1018,28 +1086,28 @@
             self.image.handleBootloading()
 
         xc.domain_setcpuweight(self.domid, self.info['cpu_weight'])
-        # XXX Merge with configure_maxmem?
+
         m = self.image.getDomainMemory(self.info['memory_KiB'])
-        xc.domain_setmaxmem(self.domid, m)
+        xc.domain_setmaxmem(self.domid, maxmem_kb = m)
         xc.domain_memory_increase_reservation(self.domid, m, 0, 0)
 
         cpu = self.info['cpu']
         if cpu is not None and cpu != -1:
             xc.domain_pincpu(self.domid, 0, 1 << cpu)
 
+        self.createChannels()
+
+        channel_details = self.image.createImage()
+
+        self.store_mfn = channel_details['store_mfn']
+        if 'console_mfn' in channel_details:
+            self.console_mfn = channel_details['console_mfn']
+
+        self.introduceDomain()
+
+        self.createDevices()
+
         self.info['start_time'] = time.time()
-
-        log.debug('init_domain> Created domain=%d name=%s memory=%d',
-                  self.domid, self.info['name'], self.info['memory_KiB'])
-
-
-    def construct_image(self):
-        """Construct the boot image for the domain.
-        """
-        self.create_channel()
-        self.image.createImage()
-        IntroduceDomain(self.domid, self.store_mfn,
-                        self.store_channel, self.dompath)
 
 
     ## public:
@@ -1065,11 +1133,11 @@
 
         try:
             if not self.info['name'].startswith(ZOMBIE_PREFIX):
-                self.info['name'] = self.generateZombieName()
+                self.info['name'] = ZOMBIE_PREFIX + self.info['name']
         except:
             log.exception("Renaming Zombie failed.")
 
-        self.state_set(STATE_VM_TERMINATED)
+        self.state_set(STATE_DOM_SHUTDOWN)
 
 
     def cleanupVm(self):
@@ -1124,50 +1192,37 @@
                 break
 
 
-    def eventChannel(self, path=None):
+    def createChannels(self):
+        """Create the channels to the domain.
+        """
+        self.store_port = self.createChannel()
+        self.console_port = self.createChannel()
+
+
+    def createChannel(self):
         """Create an event channel to the domain.
-        
-        @param path under which port is stored in db
         """
-        if path:
-            try:
-                return int(self.readDom(path))
-            except:
-                # The port is not yet set, i.e. the channel has not yet been
-                # created.
-                pass
-
         try:
-            port = xc.evtchn_alloc_unbound(dom=self.domid, remote_dom=0)
+            return xc.evtchn_alloc_unbound(dom=self.domid, remote_dom=0)
         except:
             log.exception("Exception in alloc_unbound(%d)", self.domid)
             raise
 
-        self.storeDom(path, port)
-        return port
-
-    def create_channel(self):
-        """Create the channels to the domain.
+
+    ## public:
+
+    def createDevices(self):
+        """Create the devices for a vm.
+
+        @raise: VmError for invalid devices
         """
-        self.store_channel = self.eventChannel("store/port")
-        self.console_channel = self.eventChannel("console/port")
-
-    def create_configured_devices(self):
+
         for (n, c) in self.info['device']:
             self.createDevice(n, c)
 
-
-    def create_devices(self):
-        """Create the devices for a vm.
-
-        @raise: VmError for invalid devices
-        """
-        self.create_configured_devices()
         if self.image:
             self.image.createDeviceModel()
 
-
-    ## public:
 
     def device_create(self, dev_config):
         """Create a new device.
@@ -1189,24 +1244,15 @@
         self.reconfigureDevice(deviceClass, devid, dev_config)
 
 
+    def pause(self):
+        xc.domain_pause(self.domid)
+
+
+    def unpause(self):
+        xc.domain_unpause(self.domid)
+
+
     ## private:
-
-    def restart_check(self):
-        """Check if domain restart is OK.
-        To prevent restart loops, raise an error if it is
-        less than MINIMUM_RESTART_TIME seconds since the last restart.
-        """
-        tnow = time.time()
-        if self.restart_time is not None:
-            tdelta = tnow - self.restart_time
-            if tdelta < self.MINIMUM_RESTART_TIME:
-                self.restart_cancel()
-                msg = 'VM %s restarting too fast' % self.info['name']
-                log.error(msg)
-                raise VmError(msg)
-        self.restart_time = tnow
-        self.restart_count += 1
-
 
     def restart(self, rename = False):
         """Restart the domain after it has exited.
@@ -1214,8 +1260,6 @@
         @param rename True if the old domain is to be renamed and preserved,
         False if it is to be destroyed.
         """
-
-        #            self.restart_check()
 
         config = self.sxpr()
 
@@ -1228,17 +1272,33 @@
 
         self.writeVm('xend/restart_in_progress', 'True')
 
+        now = time.time()
+        rst = self.readVm('xend/previous_restart_time')
+        log.error(rst)
+        if rst:
+            rst = float(rst)
+            timeout = now - rst
+            if timeout < MINIMUM_RESTART_TIME:
+                log.error(
+                    'VM %s restarting too fast (%f seconds since the last '
+                    'restart).  Refusing to restart to avoid loops.',
+                    self.info['name'], timeout)
+            self.destroy()
+            return
+
+        self.writeVm('xend/previous_restart_time', str(now))
+
         try:
             if rename:
                 self.preserveForRestart()
             else:
-                self.destroy()
+                self.destroyDomain()
                 
             try:
                 xd = get_component('xen.xend.XendDomain')
                 new_dom = xd.domain_create(config)
                 try:
-                    xc.domain_unpause(new_dom.getDomid())
+                    new_dom.unpause()
                 except:
                     new_dom.destroy()
                     raise
@@ -1259,12 +1319,13 @@
         """
         
         new_name = self.generateUniqueName()
-        new_uuid = getUuid()
+        new_uuid = uuid.toString(uuid.create())
         log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
-                 self.info['name'], self.domid, self.uuid, new_name, new_uuid)
+                 self.info['name'], self.domid, self.info['uuid'],
+                 new_name, new_uuid)
         self.release_devices()
         self.info['name'] = new_name
-        self.uuid = new_uuid
+        self.info['uuid'] = new_uuid
         self.vmpath = VMROOT + new_uuid
         self.storeVmDetails()
         self.preserve()
@@ -1274,21 +1335,7 @@
         log.info("Preserving dead domain %s (%d).", self.info['name'],
                  self.domid)
         self.storeDom('xend/shutdown_completed', 'True')
-        self.state_set(STATE_VM_TERMINATED)
-
-
-    ## public:
-
-    def renameUniquely(self):
-        """Rename this domain so that it has a unique name.  This is used by
-        XendDomain to recover from non-uniqueness errors; we should never have
-        allowed the system to reach this state in the first place."""
-        new_name = self.generateUniqueName()
-        
-        log.error('Renaming %s (%d, %s) to %s', self.info['name'], self.domid,
-                  self.uuid, new_name)
-
-        self.setName(new_name)
+        self.state_set(STATE_DOM_SHUTDOWN)
 
 
     # private:
@@ -1302,18 +1349,6 @@
                 return name
             except VmError:
                 n += 1
-
-
-    def generateZombieName(self):
-        n = 0
-        name = ZOMBIE_PREFIX + self.info['name']
-        while True:
-            try:
-                self.check_name(name)
-                return name
-            except VmError:
-                n += 1
-                name = "%s%d-%s" % (ZOMBIE_PREFIX, n, self.info['name'])
 
 
     def configure_bootloader(self):
@@ -1337,73 +1372,10 @@
         self.config = sxp.merge(['vm', ['image', blcfg]], self.config)
 
 
-    def configure(self):
-        """Configure a vm.
-
-        """
-        self.configure_maxmem()
-        self.create_devices()
-
-
-    def configure_maxmem(self):
-        if self.image:
-            m = self.image.getDomainMemory(self.info['memory_KiB'])
-            xc.domain_setmaxmem(self.domid, maxmem_kb = m)
-
-
-    def vcpu_hotplug(self, vcpu, state):
-        """Disable or enable VCPU in domain.
-        """
-        if vcpu > self.info['vcpus']:
-            log.error("Invalid VCPU %d" % vcpu)
-            return
-        if int(state) == 0:
-            self.info['vcpu_avail'] &= ~(1 << vcpu)
-            availability = "offline"
-        else:
-            self.info['vcpu_avail'] &= (1 << vcpu)
-            availability = "online"
-        self.storeVm('vcpu_avail', self.info['vcpu_avail'])
-        self.storeDom("cpu/%d/availability" % vcpu, availability)
-
-
     def send_sysrq(self, key):
         asserts.isCharConvertible(key)
 
         self.storeDom("control/sysrq", '%c' % key)
-
-
-    def initStoreConnection(self):
-        ref = xc.init_store(self.store_channel)
-        if ref and ref >= 0:
-            self.setStoreRef(ref)
-            try:
-                IntroduceDomain(self.domid, ref, self.store_channel,
-                                self.dompath)
-            except RuntimeError, ex:
-                if ex.args[0] == errno.EISCONN:
-                    pass
-                else:
-                    raise
-
-
-    def dom0_enforce_vcpus(self):
-        dom = 0
-        # get max number of vcpus to use for dom0 from config
-        target = int(xroot.get_dom0_vcpus())
-        log.debug("number of vcpus to use is %d", target)
-   
-        # target = 0 means use all processors
-        if target > 0:
-            # count the number of online vcpus (cpu values in v2c map >= 0)
-            vcpu_to_cpu = dom_get(dom)['vcpu_to_cpu']
-            vcpus_online = len(filter(lambda x: x >= 0, vcpu_to_cpu))
-            log.debug("found %d vcpus online", vcpus_online)
-
-            # disable any extra vcpus that are online over the requested target
-            for vcpu in range(target, vcpus_online):
-                log.info("enforcement is disabling DOM%d VCPU%d", dom, vcpu)
-                self.vcpu_hotplug(vcpu, 0)
 
 
     def infoIsSet(self, name):
@@ -1417,25 +1389,16 @@
 implements the device control specific to that device-class."""
 controllerClasses = {}
 
-
-"""A map of backend names and the corresponding flag."""
-backendFlags = {}
-
-
-def addControllerClass(device_class, backend_name, backend_flag, cls):
+def addControllerClass(device_class, cls):
     """Register a subclass of DevController to handle the named device-class.
-
-    @param backend_flag One of the SIF_XYZ_BE_DOMAIN constants, or None if
-    no flag is to be set.
     """
     cls.deviceClass = device_class
-    backendFlags[backend_name] = backend_flag
     controllerClasses[device_class] = cls
 
 
 from xen.xend.server import blkif, netif, tpmif, pciif, usbif
-addControllerClass('vbd',  'blkif', SIF_BLK_BE_DOMAIN, blkif.BlkifController)
-addControllerClass('vif',  'netif', SIF_NET_BE_DOMAIN, netif.NetifController)
-addControllerClass('vtpm', 'tpmif', SIF_TPM_BE_DOMAIN, tpmif.TPMifController)
-addControllerClass('pci',  'pciif', None,              pciif.PciController)
-addControllerClass('usb',  'usbif', None,              usbif.UsbifController)
+addControllerClass('vbd',  blkif.BlkifController)
+addControllerClass('vif',  netif.NetifController)
+addControllerClass('vtpm', tpmif.TPMifController)
+addControllerClass('pci',  pciif.PciController)
+addControllerClass('usb',  usbif.UsbifController)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/XendLogging.py
--- a/tools/python/xen/xend/XendLogging.py      Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/XendLogging.py      Mon Oct 24 15:08:13 2005
@@ -25,6 +25,14 @@
 
 __all__ = [ 'log', 'init', 'getLogFilename', 'addLogStderr',
             'removeLogStderr' ]
+
+
+if not 'TRACE' in logging.__dict__:
+    logging.TRACE = logging.DEBUG - 1
+    logging.addLevelName(logging.TRACE,'TRACE')
+    def trace(self, *args, **kwargs):
+        self.log(logging.TRACE, *args, **kwargs)
+    logging.Logger.trace = trace
 
 
 log = logging.getLogger("xend")
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/XendRoot.py
--- a/tools/python/xen/xend/XendRoot.py Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/XendRoot.py Mon Oct 24 15:08:13 2005
@@ -64,7 +64,7 @@
     xend_address_default      = ''
 
     """Default for the flag indicating whether xend should run a relocation 
server."""
-    xend_relocation_server_default = 'yes'
+    xend_relocation_server_default = 'no'
 
     """Default interface address the xend relocation server listens at. """
     xend_relocation_address_default = ''
@@ -243,29 +243,35 @@
         """
         return self.get_config_value("xend-unix-path", 
self.xend_unix_path_default)
 
-    def get_block_script(self, type):
-        return self.get_config_value('block-%s' % type, '')
-
     def get_network_script(self):
-        return self.get_config_value('network-script', '')
+        """@return the script used to alter the network configuration when
+        Xend starts and stops, or None if no such script is specified."""
+        
+        s = self.get_config_value('network-script')
+
+        if s:
+            return os.path.join(self.network_script_dir, s)
+        else:
+            return None
+
 
     def get_enable_dump(self):
         return self.get_config_bool('enable-dump', 'no')
 
     def get_vif_bridge(self):
-        return self.get_config_value('vif-bridge', 'xen-br0')
+        return self.get_config_value('vif-bridge', 'xenbr0')
 
     def get_vif_script(self):
         return self.get_config_value('vif-script', 'vif-bridge')
 
-    def get_vif_antispoof(self):
-        return self.get_config_bool('vif-antispoof', 'yes')
-
     def get_dom0_min_mem(self):
         return self.get_config_int('dom0-min-mem', self.dom0_min_mem_default)
 
     def get_dom0_vcpus(self):
         return self.get_config_int('dom0-cpus', self.dom0_vcpus_default)
+
+    def get_console_limit(self):
+        return self.get_config_int('console-limit', 1024)
 
 def instance():
     """Get an instance of XendRoot.
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/image.py    Mon Oct 24 15:08:13 2005
@@ -66,7 +66,6 @@
         self.kernel = None
         self.ramdisk = None
         self.cmdline = None
-        self.flags = 0
 
         self.configure(imageConfig, deviceConfig)
 
@@ -112,13 +111,13 @@
         """Entry point to create domain memory image.
         Override in subclass  if needed.
         """
-        self.createDomain()
+        return self.createDomain()
+
 
     def createDomain(self):
         """Build the domain boot image.
         """
         # Set params and call buildDomain().
-        self.flags = self.vm.getBackendFlags()
 
         if not os.path.isfile(self.kernel):
             raise VmError('Kernel image does not exist: %s' % self.kernel)
@@ -130,10 +129,15 @@
         
         log.info("buildDomain os=%s dom=%d vcpus=%d", self.ostype,
                  self.vm.getDomid(), self.vm.getVCpuCount())
-        err = self.buildDomain()
-        if err != 0:
-            raise VmError('Building domain failed: ostype=%s dom=%d err=%d'
-                          % (self.ostype, self.vm.getDomid(), err))
+
+        result = self.buildDomain()
+
+        if isinstance(result, dict):
+            return result
+        else:
+            raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
+                          % (self.ostype, self.vm.getDomid(), str(result)))
+
 
     def getDomainMemory(self, mem):
         """@return The memory required, in KiB, by the domain to store the
@@ -153,26 +157,14 @@
         """Extra cleanup on domain destroy (define in subclass if needed)."""
         pass
 
-    def set_vminfo(self, d):
-        if d.has_key('store_mfn'):
-            self.vm.setStoreRef(d.get('store_mfn'))
-        if d.has_key('console_mfn'):
-            self.vm.setConsoleRef(d.get('console_mfn'))
-
 
 class LinuxImageHandler(ImageHandler):
 
     ostype = "linux"
 
     def buildDomain(self):
-        if self.vm.store_channel:
-            store_evtchn = self.vm.store_channel
-        else:
-            store_evtchn = 0
-        if self.vm.console_channel:
-            console_evtchn = self.vm.console_channel
-        else:
-            console_evtchn = 0
+        store_evtchn = self.vm.getStorePort()
+        console_evtchn = self.vm.getConsolePort()
 
         log.debug("dom            = %d", self.vm.getDomid())
         log.debug("image          = %s", self.kernel)
@@ -180,21 +172,14 @@
         log.debug("console_evtchn = %d", console_evtchn)
         log.debug("cmdline        = %s", self.cmdline)
         log.debug("ramdisk        = %s", self.ramdisk)
-        log.debug("flags          = %d", self.flags)
         log.debug("vcpus          = %d", self.vm.getVCpuCount())
 
-        ret = xc.linux_build(dom            = self.vm.getDomid(),
-                             image          = self.kernel,
-                             store_evtchn   = store_evtchn,
-                             console_evtchn = console_evtchn,
-                             cmdline        = self.cmdline,
-                             ramdisk        = self.ramdisk,
-                             flags          = self.flags,
-                             vcpus          = self.vm.getVCpuCount())
-        if isinstance(ret, dict):
-            self.set_vminfo(ret)
-            return 0
-        return ret
+        return xc.linux_build(dom            = self.vm.getDomid(),
+                              image          = self.kernel,
+                              store_evtchn   = store_evtchn,
+                              console_evtchn = console_evtchn,
+                              cmdline        = self.cmdline,
+                              ramdisk        = self.ramdisk)
 
 class VmxImageHandler(ImageHandler):
 
@@ -219,45 +204,33 @@
         self.dmargs += self.configVNC(imageConfig)
 
 
-    def createImage(self):
-        """Create a VM for the VMX environment.
-        """
-        self.createDomain()
-
     def buildDomain(self):
         # Create an event channel
         self.device_channel = xc.evtchn_alloc_unbound(dom=self.vm.getDomid(),
                                                       remote_dom=0)
         log.info("VMX device model port: %d", self.device_channel)
-        if self.vm.store_channel:
-            store_evtchn = self.vm.store_channel
-        else:
-            store_evtchn = 0
+
+        store_evtchn = self.vm.getStorePort()
 
         log.debug("dom            = %d", self.vm.getDomid())
         log.debug("image          = %s", self.kernel)
         log.debug("control_evtchn = %d", self.device_channel)
         log.debug("store_evtchn   = %d", store_evtchn)
         log.debug("memsize        = %d", self.vm.getMemoryTarget() / 1024)
-        log.debug("flags          = %d", self.flags)
         log.debug("vcpus          = %d", self.vm.getVCpuCount())
 
-        ret = xc.vmx_build(dom            = self.vm.getDomid(),
-                           image          = self.kernel,
-                           control_evtchn = self.device_channel,
-                           store_evtchn   = store_evtchn,
-                           memsize        = self.vm.getMemoryTarget() / 1024,
-                           flags          = self.flags,
-                           vcpus          = self.vm.getVCpuCount())
-        if isinstance(ret, dict):
-            self.set_vminfo(ret)
-            return 0
-        return ret
+        return xc.vmx_build(dom            = self.vm.getDomid(),
+                            image          = self.kernel,
+                            control_evtchn = self.device_channel,
+                            store_evtchn   = store_evtchn,
+                            memsize        = self.vm.getMemoryTarget() / 1024,
+                            vcpus          = self.vm.getVCpuCount())
+
 
     # Return a list of cmd line args to the device models based on the
     # xm config file
     def parseDeviceModelArgs(self, imageConfig, deviceConfig):
-        dmargs = [ 'cdrom', 'boot', 'fda', 'fdb',
+        dmargs = [ 'cdrom', 'boot', 'fda', 'fdb', 'ne2000', 
                    'localtime', 'serial', 'stdvga', 'isa', 'vcpus' ]
         ret = []
         for a in dmargs:
@@ -265,15 +238,17 @@
 
             # python doesn't allow '-' in variable names
             if a == 'stdvga': a = 'std-vga'
+            if a == 'ne2000': a = 'nic-ne2000'
 
             # Handle booleans gracefully
-            if a in ['localtime', 'std-vga', 'isa']:
+            if a in ['localtime', 'std-vga', 'isa', 'nic-ne2000']:
                 if v != None: v = int(v)
-
+                if v: ret.append("-%s" % a)
+            else:
+                if v:
+                    ret.append("-%s" % a)
+                    ret.append("%s" % v)
             log.debug("args: %s, val: %s" % (a,v))
-            if v: 
-                ret.append("-%s" % a)
-                ret.append("%s" % v)
 
         # Handle disk/network related options
         for (name, info) in deviceConfig:
@@ -337,7 +312,8 @@
                   "-m", "%s" % (self.vm.getMemoryTarget() / 1024)])
         args = args + self.dmargs
         env = dict(os.environ)
-        env['DISPLAY'] = self.display
+        if self.display:
+            env['DISPLAY'] = self.display
         log.info("spawning device models: %s %s", self.device_model, args)
         self.pid = os.spawnve(os.P_NOWAIT, self.device_model, args, env)
         log.info("device model pid: %d", self.pid)
diff -r ff7c5a791ed5 -r fdea4a967bc7 
tools/python/xen/xend/server/DevController.py
--- a/tools/python/xen/xend/server/DevController.py     Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/server/DevController.py     Mon Oct 24 15:08:13 2005
@@ -82,7 +82,7 @@
         if backpath:
             xstransact.Remove(backpath)
         else:
-            raise VmError("Device not connected")
+            raise VmError("Device %s not connected" % devid)
            
 
     def configurations(self):
@@ -160,7 +160,7 @@
                 if result:
                     result = int(result)
                 else:
-                    result = 1
+                    result = 0
                 t.write("nextDeviceID", str(result + 1))
                 if t.commit():
                     return result
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/server/SrvDaemon.py
--- a/tools/python/xen/xend/server/SrvDaemon.py Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/server/SrvDaemon.py Mon Oct 24 15:08:13 2005
@@ -13,6 +13,8 @@
 import pwd
 import re
 import traceback
+
+import xen.lowlevel.xc
 
 from xen.xend.server import SrvServer
 from xen.xend.XendLogging import log
@@ -32,34 +34,6 @@
         self.traceindent = 0
         self.child = 0 
         
-    def daemon_pids(self):
-        pids = []
-        pidex = '(?P<pid>\d+)'
-        pythonex = '(?P<python>\S*python\S*)'
-        cmdex = '(?P<cmd>.*)'
-        procre = re.compile('^\s*' + pidex + '\s*' + pythonex + '\s*' + cmdex 
+ '$')
-        xendre = re.compile('^\S+/xend\s*(start|restart)\s*.*$')
-        procs = os.popen('ps -e -o pid,args 2>/dev/null')
-        for proc in procs:
-            pm = procre.match(proc)
-            if not pm: continue
-            xm = xendre.match(pm.group('cmd'))
-            if not xm: continue
-            pids.append(int(pm.group('pid')))
-        return pids
-
-    def new_cleanup(self, kill=0):
-        err = 0
-        pids = self.daemon_pids()
-        if kill:
-            for pid in pids:
-                print "Killing daemon pid=%d" % pid
-                os.kill(pid, signal.SIGHUP)
-        elif pids:
-            err = 1
-            print "Daemon already running: ", pids
-        return err
-
     def read_pid(self, pidfile):
         """Read process id from a file.
 
@@ -115,11 +89,8 @@
             os.remove(pidfile)
         return running
 
-    def cleanup_xend(self, kill=False):
+    def cleanup_xend(self, kill):
         return self.cleanup_process(XEND_PID_FILE, "xend", kill)
-
-    def cleanup(self, kill=False):
-        self.cleanup_xend(kill=kill)
 
     def status(self):
         """Returns the status of the xend daemon.
@@ -156,16 +127,13 @@
         # Detach from TTY.
         os.setsid()
 
-        # Detach from standard file descriptors.
-        # I do this at the file-descriptor level: the overlying Python file
-        # objects also use fd's 0, 1 and 2.
+        # Detach from standard file descriptors, and redirect them to
+        # /dev/null or the log as appropriate.
         os.close(0)
         os.close(1)
         os.close(2)
         if XEND_DEBUG:
             os.open('/dev/null', os.O_RDONLY)
-            # XXX KAF: Why doesn't this capture output from C extensions that
-            # fprintf(stdout) or fprintf(stderr) ??
             os.open(XEND_DEBUG_LOG, os.O_WRONLY|os.O_CREAT)
             os.dup(1)
         else:
@@ -180,7 +148,7 @@
         0  Success
         4  Insufficient privileges
         """
-        xend_pid = self.cleanup_xend()
+        xend_pid = self.cleanup_xend(False)
 
         if self.set_user():
             return 4
@@ -294,11 +262,20 @@
             return 1
 
     def stop(self):
-        return self.cleanup(kill=True)
+        result = self.cleanup_xend(True)
+        from xen.xend import Vifctl
+        Vifctl.network("stop")
+        return result
 
     def run(self, status):
         try:
             log.info("Xend Daemon started")
+
+            xc = xen.lowlevel.xc.new()
+            xinfo = xc.xeninfo()
+            log.info("Xend changeset: %s.", xinfo['xen_changeset'])
+            del xc
+
             event.listenEvent(self)
             relocate.listenRelocation()
             servers = SrvServer.create()
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/server/SrvDomain.py
--- a/tools/python/xen/xend/server/SrvDomain.py Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/server/SrvDomain.py Mon Oct 24 15:08:13 2005
@@ -89,7 +89,7 @@
         fn = FormFn(self.xd.domain_pincpu,
                     [['dom', 'int'],
                      ['vcpu', 'int'],
-                     ['cpumap', 'int']])
+                     ['cpumap', 'str']])
         val = fn(req.args, {'dom': self.dom.domid})
         return val
 
@@ -105,6 +105,13 @@
         return val
     
     
+    def op_cpu_sedf_get(self, op, req):
+        fn = FormFn(self.xd.domain_cpu_sedf_get,
+                    [['dom', 'int']])
+        val = fn(req.args, {'dom': self.dom.domid})
+        return val
+
+
     def op_cpu_sedf_set(self, op, req):
         fn = FormFn(self.xd.domain_cpu_sedf_set,
                     [['dom', 'int'],
@@ -146,13 +153,13 @@
     def op_device_destroy(self, op, req):
         return self.call(self.dom.destroyDevice,
                          [['type', 'str'],
-                          ['dev',  'int']],
+                          ['dev',  'str']],
                          req)
                 
     def op_device_configure(self, op, req):
         return self.call(self.dom.device_configure,
                          [['config', 'sxpr'],
-                          ['dev',    'int']],
+                          ['dev',    'str']],
                          req)
 
 
@@ -165,17 +172,25 @@
         val = fn(req.args, {'dom': self.dom.domid})
         return val
 
-    def op_vcpu_hotplug(self, op, req):
-        return self.call(self.dom.vcpu_hotplug,
-                         [['vcpu', 'int'],
-                          ['state', 'int']],
-                         req)
+    def op_set_vcpus(self, op, req):
+        return self.call(self.dom.setVCpuCount,
+                         [['vcpus', 'int']],
+                         req)
+
+
+    def op_vcpuinfo(self, _1, req):
+        return self.call(self.dom.getVCPUInfo, [], req)
+
 
     def render_POST(self, req):
         return self.perform(req)
         
     def render_GET(self, req):
         op = req.args.get('op')
+
+        if op and op[0] in ['vcpuinfo']:
+            return self.perform(req)
+
         #
         # XXX SMH: below may be useful once again if we ever try to get
         # the raw 'web' interface to xend working once more. But for now
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/server/SrvServer.py
--- a/tools/python/xen/xend/server/SrvServer.py Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/server/SrvServer.py Mon Oct 24 15:08:13 2005
@@ -39,6 +39,7 @@
 # todo Support security settings etc. in the config file.
 # todo Support command-line args.
 
+import fcntl
 from threading import Thread
 
 from xen.web.httpserver import HttpServer, UnixHttpServer
@@ -64,6 +65,11 @@
         self.servers.append(server)
 
     def start(self, status):
+        # Running the network script will spawn another process, which takes
+        # the status fd with it unless we set FD_CLOEXEC.  Failing to do this
+        # causes the read in SrvDaemon to hang even when we have written here.
+        fcntl.fcntl(status, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
+        
         Vifctl.network('start')
         threads = []
         for server in self.servers:
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/server/blkif.py
--- a/tools/python/xen/xend/server/blkif.py     Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/server/blkif.py     Mon Oct 24 15:08:13 2005
@@ -22,8 +22,9 @@
 
 from xen.util import blkif
 from xen.xend import sxp
+from xen.xend.XendError import VmError
 
-from DevController import DevController
+from xen.xend.server.DevController import DevController
 
 
 class BlkifController(DevController):
@@ -96,6 +97,4 @@
                 if self.readBackend(i, 'dev') == devid:
                     DevController.destroyDevice(self, i)
                     return
-            # Try this, but it's almost certainly going to throw VmError,
-            # since we can't find the device.
-            DevController.destroyDevice(self, int(devid))
+            raise VmError("Device %s not connected" % devid)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/uuid.py
--- a/tools/python/xen/xend/uuid.py     Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/uuid.py     Mon Oct 24 15:08:13 2005
@@ -36,23 +36,27 @@
         cmd += " -r"
     else:
         cmd += " -t"
-    return commands.getoutput(cmd)
+    return fromString(commands.getoutput(cmd))
 
 
 def getUuidRandom():
     """Generate a random UUID."""
     
-    bytes = [ random.randint(0, 255) for i in range(0, 16) ]
-    # Encode the variant.
-    bytes[6] = (bytes[6] & 0x0f) | 0x40
-    bytes[8] = (bytes[8] & 0x3f) | 0x80
-    f = "%02x"
-    return ( "-".join([f*4, f*2, f*2, f*2, f*6]) % tuple(bytes) )
+    return [ random.randint(0, 255) for i in range(0, 16) ]
 
 
 #uuidFactory = getUuidUuidgen
 uuidFactory = getUuidRandom
 
 
-def getUuid():
+def create():
     return uuidFactory()
+
+
+def toString(u):
+    f = "%02x"
+    return ( "-".join([f*4, f*2, f*2, f*2, f*6]) % tuple(u) )
+
+def fromString(s):
+    s = s.replace('-', '')
+    return [ int(s[i : i + 2], 16) for i in range(0, 32, 2) ]
diff -r ff7c5a791ed5 -r fdea4a967bc7 
tools/python/xen/xend/xenstore/xstransact.py
--- a/tools/python/xen/xend/xenstore/xstransact.py      Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/xenstore/xstransact.py      Mon Oct 24 15:08:13 2005
@@ -14,6 +14,9 @@
 class xstransact:
 
     def __init__(self, path):
+        self.in_transaction = False # Set this temporarily -- if this
+                                    # constructor fails, then we need to
+                                    # protect __del__.
         self.path = path.rstrip("/")
         self.transaction = xshandle().transaction_start()
         self.in_transaction = True
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/xenstore/xsutil.py
--- a/tools/python/xen/xend/xenstore/xsutil.py  Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/xenstore/xsutil.py  Mon Oct 24 15:08:13 2005
@@ -19,8 +19,8 @@
         xs_lock.release()
     return xs_handle
 
-def IntroduceDomain(domid, page, port, path):
-    return xshandle().introduce_domain(domid, page, port, path)
+def IntroduceDomain(domid, page, port):
+    return xshandle().introduce_domain(domid, page, port)
 
 def GetDomainPath(domid):
     return xshandle().get_domain_path(domid)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/xenstore/xswatch.py
--- a/tools/python/xen/xend/xenstore/xswatch.py Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/xenstore/xswatch.py Mon Oct 24 15:08:13 2005
@@ -9,6 +9,9 @@
 import threading
 from xen.lowlevel import xs
 from xen.xend.xenstore.xsutil import xshandle
+
+from xen.xend.XendLogging import log
+
 
 class xswatch:
 
@@ -41,10 +44,13 @@
         while True:
             try:
                 we = cls.xs.read_watch()
-            except RuntimeError, ex:
-                print ex
-                raise
-            watch = we[1]
-            watch.fn(*watch.args, **watch.kwargs)
+                watch = we[1]
+                watch.fn(*watch.args, **watch.kwargs)
+            except:
+                log.exception("read_watch failed")
+                # Ignore this exception -- there's no point throwing it
+                # further on because that will just kill the watcher thread,
+                # which achieves nothing.
+
 
     watchMain = classmethod(watchMain)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py     Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xm/create.py     Mon Oct 24 15:08:13 2005
@@ -363,6 +363,10 @@
 gopts.var('nographic', val='no|yes',
           fn=set_bool, default=0,
           use="Should device models use graphics?")
+
+gopts.var('ne2000', val='no|yes',
+          fn=set_bool, default=0,
+          use="Should device models use ne2000?")
 
 gopts.var('vnc', val='',
           fn=set_value, default=None,
@@ -540,7 +544,7 @@
     """
     args = [ 'memmap', 'device_model', 'vcpus', 'cdrom',
              'boot', 'fda', 'fdb', 'localtime', 'serial', 'macaddr', 'stdvga', 
-             'isa', 'nographic', 'vnc', 'vncviewer', 'sdl', 'display']
+             'isa', 'nographic', 'vnc', 'vncviewer', 'sdl', 'display', 
'ne2000']
     for a in args:
         if (vals.__dict__[a]):
             config_image.append([a, vals.__dict__[a]])
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xm/main.py       Mon Oct 24 15:08:13 2005
@@ -71,8 +71,7 @@
 xm full list of subcommands:
 
   Domain Commands:
-    console <DomId>         attach to console of DomId
-    cpus-list <DomId> <VCpu>          get the list of cpus for a VCPU
+    console <DomId>           attach to console of DomId
     create  <ConfigFile>      create a domain
     destroy <DomId>           terminate a domain immediately
     domid   <DomName>         convert a domain name to a domain id
@@ -88,10 +87,9 @@
     shutdown [-w|-a] <DomId>  shutdown a domain
     sysrq   <DomId> <letter>  send a sysrq to a domain
     unpause <DomId>           unpause a paused domain
-    vcpu-enable <DomId> <VCPU>        enable VCPU in a domain
-    vcpu-disable <DomId> <VCPU>       disable VCPU in a domain
-    vcpu-list <DomId>                 get the list of VCPUs for a domain
-    vcpu-pin <DomId> <VCpu> <CPUS>    set which cpus a VCPU can use. 
+    set-vcpus <DomId> <VCPUs> enable the specified number of VCPUs in a domain
+    vcpu-list <DomId>         list the VCPUs for a domain
+    vcpu-pin <DomId> <VCPU> <CPUs>    set which cpus a VCPU can use. 
 
   Xen Host Commands:
     dmesg   [--clear]         read or clear Xen's message buffer
@@ -169,11 +167,9 @@
     if error == "Not found" and dom != None:
         err("Domain '%s' not found when running 'xm %s'" % (dom, cmd))
         sys.exit(1)
-    elif error == "Exception: Device not connected":
-        err("Device not connected")
+    else:
+        err(error)
         sys.exit(1)
-    else:
-        raise ex
     
 
 #########################################################################
@@ -210,6 +206,15 @@
     id = sxp.child_value(info, 'domid')
     if id is not None:
         server.xend_domain_unpause(domid)
+
+
+def getDomains(domain_names):
+    from xen.xend.XendClient import server
+    if domain_names:
+        return map(server.xend_domain, domain_names)
+    else:
+        return server.xend_list_domains()
+
 
 def xm_list(args):
     use_long = 0
@@ -220,80 +225,161 @@
         err(opterr)
         sys.exit(1)
     
-    n = len(params)
     for (k, v) in options:
         if k in ['-l', '--long']:
             use_long = 1
         if k in ['-v', '--vcpus']:
             show_vcpus = 1
 
-    from xen.xend.XendClient import server
-    if n == 0:
+    if show_vcpus:
+        print >>sys.stderr, (
+            "xm list -v is deprecated.  Please use xm vcpu-list.")
+        xm_vcpu_list(params)
+        return
+
+    doms = getDomains(params)
+
+    if use_long:
+        map(PrettyPrint.prettyprint, doms)
+    else:
+        xm_brief_list(doms)
+
+
+def parse_doms_info(info):
+    def get_info(n, t, d):
+        return t(sxp.child_value(info, n, d))
+    
+    return {
+        'dom'      : get_info('domid',    int,   -1),
+        'name'     : get_info('name',     str,   '??'),
+        'mem'      : get_info('memory',   int,   0),
+        'vcpus'    : get_info('vcpus',    int,   0),
+        'state'    : get_info('state',    str,   '??'),
+        'cpu_time' : get_info('cpu_time', float, 0),
+        'ssidref'  : get_info('ssidref',  int,   0),
+        }
+
+
+def xm_brief_list(doms):
+    print 'Name                              ID Mem(MiB) VCPUs State  Time(s)'
+    for dom in doms:
+        d = parse_doms_info(dom)
+        if (d['ssidref'] != 0):
+            d['ssidstr'] = (" s:%04x/p:%04x" % 
+                            ((d['ssidref'] >> 16) & 0xffff,
+                              d['ssidref']        & 0xffff))
+        else:
+            d['ssidstr'] = ""
+        print ("%(name)-32s %(dom)3d %(mem)8d %(vcpus)5d %(state)5s 
%(cpu_time)7.1f%(ssidstr)s" % d)
+
+
+def xm_vcpu_list(args):
+    print 'Name                              ID  VCPU  CPU  State  Time(s)  
CPU Affinity'
+
+    from xen.xend.XendClient import server
+    if args:
+        dominfo = map(server.xend_domain_vcpuinfo, args)
+    else:
         doms = server.xend_list_domains()
-    else:
-        doms = map(server.xend_domain, params)
-               
-    if use_long:
-        for dom in doms:
-            PrettyPrint.prettyprint(doms)
-    else:
-        domsinfo = map(parse_doms_info, doms)
-
-        if show_vcpus:
-            xm_show_vcpus(domsinfo)
-        else:
-            xm_brief_list(domsinfo)
-
-def parse_doms_info(info):
-    dominfo = {}
-    dominfo['dom'] = int(sxp.child_value(info, 'domid', '-1'))
-    dominfo['name'] = sxp.child_value(info, 'name', '??')
-    dominfo['mem'] = int(sxp.child_value(info, 'memory', '0'))
-    dominfo['cpu'] = str(sxp.child_value(info, 'cpu', '0'))
-    dominfo['vcpus'] = int(sxp.child_value(info, 'vcpus', '0'))
-    # if there is more than 1 cpu, the value doesn't mean much
-    if dominfo['vcpus'] > 1:
-        dominfo['cpu'] = '-'
-    dominfo['state'] = sxp.child_value(info, 'state', '??')
-    dominfo['cpu_time'] = float(sxp.child_value(info, 'cpu_time', '0'))
-    # security identifiers
-    if ((int(sxp.child_value(info, 'ssidref', '0'))) != 0):
-        dominfo['ssidref1'] =  int(sxp.child_value(info, 'ssidref', '0')) & 
0xffff
-        dominfo['ssidref2'] = (int(sxp.child_value(info, 'ssidref', '0')) >> 
16) & 0xffff
-    # get out the vcpu information
-    dominfo['vcpulist'] = []
-    vcpu_to_cpu = sxp.child_value(info, 'vcpu_to_cpu', '-1').split('|')
-    cpumap = sxp.child_value(info, 'cpumap', [])
-    mask = ((int(sxp.child_value(info, 'vcpus', '0')))**2) - 1
-    count = 0
-    for cpu in vcpu_to_cpu:
-        vcpuinfo = {}
-        vcpuinfo['name']   = sxp.child_value(info, 'name', '??')
-        vcpuinfo['dom']    = int(sxp.child_value(info, 'domid', '-1'))
-        vcpuinfo['vcpu']   = int(count)
-        vcpuinfo['cpu']    = int(cpu)
-        vcpuinfo['cpumap'] = int(cpumap[count])&mask
-        count = count + 1
-        dominfo['vcpulist'].append(vcpuinfo)
-    return dominfo
-        
-def xm_brief_list(domsinfo):
-    print 'Name              ID  Mem(MiB)  CPU  VCPUs  State   Time(s)'
-    for dominfo in domsinfo:
-        if dominfo.has_key("ssidref1"):
-            print ("%(name)-16s %(dom)3d  %(mem)8d  %(cpu)3s  %(vcpus)5d  
%(state)5s  %(cpu_time)7.1f     s:%(ssidref2)02x/p:%(ssidref1)02x" % dominfo)
-        else:
-            print ("%(name)-16s %(dom)3d  %(mem)8d  %(cpu)3s  %(vcpus)5d  
%(state)5s  %(cpu_time)7.1f" % dominfo)
-
-def xm_show_vcpus(domsinfo):
-    print 'Name              Id  VCPU  CPU  CPUMAP'
-    for dominfo in domsinfo:
-        for vcpuinfo in dominfo['vcpulist']:
-            print ("%(name)-16s %(dom)3d  %(vcpu)4d  %(cpu)3d  0x%(cpumap)x" %
-                   vcpuinfo)
-
-def xm_vcpu_list(args):
-    xm_list(["-v"] + args)
+        dominfo = map(
+            lambda x: server.xend_domain_vcpuinfo(sxp.child_value(x, 'name')),
+            doms)
+
+    for dom in dominfo:
+        def get_info(n):
+            return sxp.child_value(dom, n)
+
+        #
+        # convert a list of integers into a list of pairs indicating
+        # continuous sequences in the list:
+        #
+        # [0,1,2,3]   -> [(0,3)]
+        # [1,2,4,5]   -> [(1,2),(4,5)]
+        # [0]         -> [(0,0)]
+        # [0,1,4,6,7] -> [(0,1),(4,4),(6,7)]
+        #
+        def list_to_rangepairs(cmap):
+            cmap.sort()
+            pairs = []
+            x = y = 0
+            for i in range(0,len(cmap)):
+                try:
+                    if ((cmap[y+1] - cmap[i]) > 1):
+                        pairs.append((cmap[x],cmap[y]))
+                        x = y = i+1
+                    else:
+                        y = y + 1
+                # if we go off the end, then just add x to y
+                except IndexError:
+                    pairs.append((cmap[x],cmap[y]))
+
+            return pairs
+
+        #
+        # Convert pairs to range string, e.g: [(1,2),(3,3),(5,7)] -> 1-2,3,5-7
+        #
+        def format_pairs(pairs):
+            if not pairs:
+                return "no cpus"
+            out = ""
+            for f,s in pairs:
+                if (f==s):
+                    out += '%d'%f
+                else:
+                    out += '%d-%d'%(f,s)
+                out += ','
+            # trim trailing ','
+            return out[:-1]
+
+        def format_cpumap(cpumap):
+            cpumap = map(lambda x: int(x), cpumap)
+            cpumap.sort()
+
+            from xen.xend.XendClient import server
+            for x in server.xend_node()[1:]:
+                if len(x) > 1 and x[0] == 'nr_cpus':
+                    nr_cpus = int(x[1])
+                    cpumap = filter(lambda x: x < nr_cpus, cpumap)
+                    if len(cpumap) == nr_cpus:
+                        return "any cpu"
+                    break
+ 
+            return format_pairs(list_to_rangepairs(cpumap))
+
+        name  =     get_info('name')
+        domid = int(get_info('domid'))
+
+        for vcpu in sxp.children(dom, 'vcpu'):
+            def vinfo(n, t):
+                return t(sxp.child_value(vcpu, n))
+
+            number   = vinfo('number',   int)
+            cpu      = vinfo('cpu',      int)
+            cpumap   = format_cpumap(vinfo('cpumap', list))
+            online   = vinfo('online',   int)
+            cpu_time = vinfo('cpu_time', float)
+            running  = vinfo('running',  int)
+            blocked  = vinfo('blocked',  int)
+
+            if online:
+                c = str(cpu)
+                if running:
+                    s = 'r'
+                else:
+                    s = '-'
+                if blocked:
+                    s += 'b'
+                else:
+                    s += '-'
+                s += '-'
+            else:
+                c = "-"
+                s = "--p"
+
+            print (
+                "%(name)-32s %(domid)3d  %(number)4d  %(c)3s   %(s)-3s   
%(cpu_time)7.1f  %(cpumap)s" %
+                locals())
+
 
 def xm_reboot(args):
     arg_check(args,1,"reboot")
@@ -325,7 +411,6 @@
 
 def cpu_make_map(cpulist):
     cpus = []
-    cpumap = 0
     for c in cpulist.split(','):
         if c.find('-') != -1:
             (x,y) = c.split('-')
@@ -334,14 +419,11 @@
         else:
             cpus.append(int(c))
     cpus.sort()
-    for c in cpus:
-        cpumap = cpumap | 1<<c
-
-    return cpumap
+    return cpus
 
 def xm_vcpu_pin(args):
     arg_check(args, 3, "vcpu-pin")
-    
+
     dom  = args[0]
     vcpu = int(args[1])
     cpumap = cpu_make_map(args[2])
@@ -351,7 +433,7 @@
 
 def xm_mem_max(args):
     arg_check(args, 2, "mem-max")
-    
+
     dom = args[0]
     mem = int_unit(args[1], 'm')
 
@@ -360,35 +442,16 @@
     
 def xm_mem_set(args):
     arg_check(args, 2, "mem-set")
-    
+
     dom = args[0]
     mem_target = int_unit(args[1], 'm')
 
     from xen.xend.XendClient import server
     server.xend_domain_mem_target_set(dom, mem_target)
     
-# TODO: why does this lookup by name?  and what if that fails!?
-def xm_vcpu_enable(args):
-    arg_check(args, 2, "vcpu-enable")
-    
-    name = args[0]
-    vcpu = int(args[1])
-    
-    from xen.xend.XendClient import server
-    dom = server.xend_domain(name)
-    id = sxp.child_value(dom, 'domid')
-    server.xend_domain_vcpu_hotplug(id, vcpu, 1)
-
-def xm_vcpu_disable(args):
-    arg_check(args, 2, "vcpu-disable")
-    
-    name = args[0]
-    vcpu = int(args[1])
-    
-    from xen.xend.XendClient import server
-    dom = server.xend_domain(name)
-    id = sxp.child_value(dom, 'domid')
-    server.xend_domain_vcpu_hotplug(id, vcpu, 0)
+def xm_set_vcpus(args):
+    from xen.xend.XendClient import server
+    server.xend_domain_set_vcpus(args[0], int(args[1]))
 
 def xm_domid(args):
     name = args[0]
@@ -588,9 +651,7 @@
     "mem-set": xm_mem_set,
     # cpu commands
     "vcpu-pin": xm_vcpu_pin,
-#    "cpus-list": xm_cpus_list,
-    "vcpu-enable": xm_vcpu_enable,
-    "vcpu-disable": xm_vcpu_disable,
+    "set-vcpus": xm_set_vcpus,
     "vcpu-list": xm_vcpu_list,
     # special
     "pause": xm_pause,
@@ -688,11 +749,16 @@
             if rc:
                 usage()
         except socket.error, ex:
-            print >>sys.stderr, ex
-            err("Error connecting to xend, is xend running?")
+            if os.geteuid() != 0:
+                err("Most commands need root access.  Please try again as 
root.")
+            else:
+                err("Error connecting to xend: %s.  Is xend running?" % ex[1])
             sys.exit(1)
         except IOError:
-            err("Most commands need root access.  Please try again as root")
+            if os.geteuid() != 0:
+                err("Most commands need root access.  Please try again as 
root.")
+            else:
+                err("Error connecting to xend: %s." % ex[1])
             sys.exit(1)
         except xen.xend.XendError.XendError, ex:
             if len(args) > 0:
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/security/Makefile
--- a/tools/security/Makefile   Fri Oct 21 19:58:39 2005
+++ b/tools/security/Makefile   Mon Oct 24 15:08:13 2005
@@ -43,14 +43,15 @@
 build: mk-symlinks
        $(MAKE) secpol_tool
        $(MAKE) secpol_xml2bin
+       $(MAKE) get_decision
        chmod 700 ./setlabel.sh
        chmod 700 ./updategrub.sh
        chmod 700 ./getlabel.sh
 
-secpol_tool : secpol_tool.c secpol_compat.h
+secpol_tool : secpol_tool.c
        $(CC) $(CPPFLAGS) $(CFLAGS) -o $@ $<
 
-secpol_xml2bin : secpol_xml2bin.c secpol_xml2bin.h secpol_compat.h
+secpol_xml2bin : secpol_xml2bin.c secpol_xml2bin.h
        $(CC) $(CPPFLAGS) $(CFLAGS) $(CFLAGS_XML2BIN) $(VALIDATE_SCHEMA) -o $@ 
$<
 
 clean:
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/security/secpol_tool.c
--- a/tools/security/secpol_tool.c      Fri Oct 21 19:58:39 2005
+++ b/tools/security/secpol_tool.c      Mon Oct 24 15:08:13 2005
@@ -33,7 +33,7 @@
 #include <sys/ioctl.h>
 #include <string.h>
 #include <netinet/in.h>
-#include "secpol_compat.h"
+#include <stdint.h>
 #include <xen/acm.h>
 #include <xen/acm_ops.h>
 #include <xen/linux/privcmd.h>
@@ -67,7 +67,7 @@
                         (unsigned long) hypercall);
 }
 
-static inline int do_acm_op(int xc_handle, acm_op_t * op)
+static inline int do_acm_op(int xc_handle, struct acm_op * op)
 {
     int ret = -1;
     privcmd_hypercall_t hypercall;
@@ -275,10 +275,10 @@
 /******************************* get policy ******************************/
 
 #define PULL_CACHE_SIZE                8192
-u8 pull_buffer[PULL_CACHE_SIZE];
+uint8_t pull_buffer[PULL_CACHE_SIZE];
 int acm_domain_getpolicy(int xc_handle)
 {
-    acm_op_t op;
+    struct acm_op op;
     int ret;
 
     memset(pull_buffer, 0x00, sizeof(pull_buffer));
@@ -299,7 +299,7 @@
     struct stat mystat;
     int ret, fd;
     off_t len;
-    u8 *buffer;
+    uint8_t *buffer;
 
     if ((ret = stat(filename, &mystat)))
     {
@@ -321,7 +321,7 @@
     }
     if (len == read(fd, buffer, len))
     {
-        acm_op_t op;
+        struct acm_op op;
         /* dump it and then push it down into xen/acm */
         acm_dump_policy_buffer(buffer, len);
         op.cmd = ACM_SETPOLICY;
@@ -368,8 +368,8 @@
 #define PULL_STATS_SIZE                8192
 int acm_domain_dumpstats(int xc_handle)
 {
-    u8 stats_buffer[PULL_STATS_SIZE];
-    acm_op_t op;
+    uint8_t stats_buffer[PULL_STATS_SIZE];
+    struct acm_op op;
     int ret;
     struct acm_stats_buffer *stats;
 
@@ -442,7 +442,7 @@
     /* this includes header and a set of types */
     #define MAX_SSIDBUFFER  2000
     int ret, i;
-    acm_op_t op;
+    struct acm_op op;
     struct acm_ssid_buffer *hdr;
     unsigned char *buf;
        int nice_print = 1;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/security/secpol_xml2bin.c
--- a/tools/security/secpol_xml2bin.c   Fri Oct 21 19:58:39 2005
+++ b/tools/security/secpol_xml2bin.c   Mon Oct 24 15:08:13 2005
@@ -37,7 +37,7 @@
 #include <libxml/parser.h>
 #include <libxml/tree.h>
 #include <libxml/xmlreader.h>
-#include "secpol_compat.h"
+#include <stdint.h>
 #include <xen/acm.h>
 
 #include "secpol_xml2bin.h"
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/vnet/vnet-module/varp.c
--- a/tools/vnet/vnet-module/varp.c     Fri Oct 21 19:58:39 2005
+++ b/tools/vnet/vnet-module/varp.c     Mon Oct 24 15:08:13 2005
@@ -176,7 +176,7 @@
 /** UDP port (network order). */
 u16 varp_port = 0;
 
-char *varp_device = "xen-br0";
+char *varp_device = "xenbr0";
 
 #define VarpTable_read_lock(z, flags)    do{ (flags) = 0; down(&(z)->lock); } 
while(0)
 #define VarpTable_read_unlock(z, flags)  do{ (flags) = 0; up(&(z)->lock); } 
while(0)
diff -r ff7c5a791ed5 -r fdea4a967bc7 
tools/xenstat/libxenstat/src/xen-interface.c
--- a/tools/xenstat/libxenstat/src/xen-interface.c      Fri Oct 21 19:58:39 2005
+++ b/tools/xenstat/libxenstat/src/xen-interface.c      Mon Oct 24 15:08:13 2005
@@ -173,21 +173,22 @@
        return op.u.getdomaininfolist.num_domains;
 }
 
-/* Returns cpu usage data from dom0 */
-long long xi_get_vcpu_usage(xi_handle *handle, unsigned int domain,
-                            unsigned int vcpu)
+/* Get vcpu info from a domain */
+int xi_get_domain_vcpu_info(xi_handle *handle, unsigned int domain, 
+                            unsigned int vcpu, dom0_getvcpuinfo_t *info)
 {
        dom0_op_t op;
-       op.u.getvcpucontext.domain = domain;
-       op.u.getvcpucontext.vcpu = vcpu;
-       op.u.getvcpucontext.ctxt = NULL;
-
-       if (xi_make_dom0_op(handle, &op, DOM0_GETVCPUCONTEXT) < 0) {
-               perror("DOM0_GETVCPUCONTEXT Hypercall failed");
-               return -1;
-       }
-
-       return op.u.getvcpucontext.cpu_time;
+       op.u.getvcpuinfo.domain = domain;
+       op.u.getvcpuinfo.vcpu   = vcpu;
+
+       if (xi_make_dom0_op(handle, &op, DOM0_GETVCPUINFO) < 0) {
+               perror("DOM0_GETVCPUINFO Hypercall failed");
+               return -1;
+       }
+
+       memcpy(info, &op.u.getvcpuinfo, sizeof(dom0_getvcpuinfo_t));
+
+       return 0;
 }
 
 /* gets xen version information from hypervisor */
diff -r ff7c5a791ed5 -r fdea4a967bc7 
tools/xenstat/libxenstat/src/xen-interface.h
--- a/tools/xenstat/libxenstat/src/xen-interface.h      Fri Oct 21 19:58:39 2005
+++ b/tools/xenstat/libxenstat/src/xen-interface.h      Mon Oct 24 15:08:13 2005
@@ -16,16 +16,6 @@
  */
 
 #include <stdint.h>
-
-typedef int8_t   s8;
-typedef int16_t  s16;
-typedef int32_t  s32;
-typedef int64_t  s64;
-typedef uint8_t  u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
-
 #include <xen/xen.h>
 #include <xen/dom0_ops.h>
 #include <xen/sched.h>
@@ -51,5 +41,6 @@
 int xi_get_domaininfolist(xi_handle *, dom0_getdomaininfo_t *, unsigned int,
                           unsigned int);
 
-/* Returns cpu usage data from dom0 */
-long long xi_get_vcpu_usage(xi_handle *, unsigned int, unsigned int);
+/* Get vcpu info from a domain */
+int xi_get_domain_vcpu_info(xi_handle *, unsigned int, unsigned int,
+                            dom0_getvcpuinfo_t *);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstat/libxenstat/src/xenstat.c
--- a/tools/xenstat/libxenstat/src/xenstat.c    Fri Oct 21 19:58:39 2005
+++ b/tools/xenstat/libxenstat/src/xenstat.c    Mon Oct 24 15:08:13 2005
@@ -51,7 +51,7 @@
        unsigned int id;
        unsigned int state;
        unsigned long long cpu_ns;
-       unsigned int num_vcpus;
+       unsigned int num_vcpus;         /* No. vcpus configured for domain */
        xenstat_vcpu *vcpus;            /* Array of length num_vcpus */
        unsigned long long cur_mem;     /* Current memory reservation */
        unsigned long long max_mem;     /* Total memory allowed */
@@ -61,6 +61,7 @@
 };
 
 struct xenstat_vcpu {
+       unsigned int online;
        unsigned long long ns;
 };
 
@@ -229,7 +230,7 @@
                        domain->id = domaininfo[i].domain;
                        domain->state = domaininfo[i].flags;
                        domain->cpu_ns = domaininfo[i].cpu_time;
-                       domain->num_vcpus = domaininfo[i].n_vcpu;
+                       domain->num_vcpus = (domaininfo[i].max_vcpu_id+1);
                        domain->vcpus = NULL;
                        domain->cur_mem =
                            ((unsigned long long)domaininfo[i].tot_pages)
@@ -344,7 +345,7 @@
        return domain->cpu_ns;
 }
 
-/* Find the number of VCPUs allocated to a domain */
+/* Find the number of VCPUs for a domain */
 unsigned int xenstat_domain_num_vcpus(xenstat_domain * domain)
 {
        return domain->num_vcpus;
@@ -432,22 +433,24 @@
 static int xenstat_collect_vcpus(xenstat_node * node)
 {
        unsigned int i, vcpu;
+
        /* Fill in VCPU information */
        for (i = 0; i < node->num_domains; i++) {
                node->domains[i].vcpus = malloc(node->domains[i].num_vcpus
                                                * sizeof(xenstat_vcpu));
                if (node->domains[i].vcpus == NULL)
                        return 0;
-
+       
                for (vcpu = 0; vcpu < node->domains[i].num_vcpus; vcpu++) {
                        /* FIXME: need to be using a more efficient mechanism*/
-                       long long vcpu_time;
-                       vcpu_time = xi_get_vcpu_usage(node->handle->xihandle,
-                                                     node->domains[i].id,
-                                                     vcpu);
-                       if (vcpu_time < 0)
+                       dom0_getvcpuinfo_t info;
+
+                       if (xi_get_domain_vcpu_info(node->handle->xihandle,
+                           node->domains[i].id, vcpu, &info) != 0)
                                return 0;
-                       node->domains[i].vcpus[vcpu].ns = vcpu_time;
+
+                       node->domains[i].vcpus[vcpu].online = info.online;
+                       node->domains[i].vcpus[vcpu].ns = info.cpu_time;
                }
        }
        return 1;
@@ -464,6 +467,12 @@
 /* Free VCPU information in handle - nothing to do */
 static void xenstat_uninit_vcpus(xenstat_handle * handle)
 {
+}
+
+/* Get VCPU online status */
+unsigned int xenstat_vcpu_online(xenstat_vcpu * vcpu)
+{
+       return vcpu->online;
 }
 
 /* Get VCPU usage */
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstat/libxenstat/src/xenstat.h
--- a/tools/xenstat/libxenstat/src/xenstat.h    Fri Oct 21 19:58:39 2005
+++ b/tools/xenstat/libxenstat/src/xenstat.h    Mon Oct 24 15:08:13 2005
@@ -119,6 +119,7 @@
  */
 
 /* Get VCPU usage */
+unsigned int xenstat_vcpu_online(xenstat_vcpu * vcpu);
 unsigned long long xenstat_vcpu_ns(xenstat_vcpu * vcpu);
 
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstat/xentop/xentop.c
--- a/tools/xenstat/xentop/xentop.c     Fri Oct 21 19:58:39 2005
+++ b/tools/xenstat/xentop/xentop.c     Mon Oct 24 15:08:13 2005
@@ -713,13 +713,16 @@
 
        num_vcpus = xenstat_domain_num_vcpus(domain);
 
-       /* for all vcpus dump out values */
+       /* for all online vcpus dump out values */
        for (i=0; i< num_vcpus; i++) {
                vcpu = xenstat_domain_vcpu(domain,i);
 
-               if (i != 0 && (i%5)==0)
-                       print("\n        ");
-               print(" %2u: %10llus", i, xenstat_vcpu_ns(vcpu)/1000000000);
+               if (xenstat_vcpu_online(vcpu) > 0) {
+                       if (i != 0 && (i%5)==0)
+                               print("\n        ");
+                       print(" %2u: %10llus", i, 
+                                       xenstat_vcpu_ns(vcpu)/1000000000);
+               }
        }
        print("\n");
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/Makefile
--- a/tools/xenstore/Makefile   Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/Makefile   Mon Oct 24 15:08:13 2005
@@ -34,14 +34,14 @@
 xenstored: xenstored_core.o xenstored_watch.o xenstored_domain.o 
xenstored_transaction.o xs_lib.o talloc.o utils.o tdb.o
        $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxenctrl -o $@
 
-$(CLIENTS): xenstore-%: xenstore_%.o
-       $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@
+$(CLIENTS): xenstore-%: xenstore_%.o libxenstore.so
+       $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@
 
 $(CLIENTS_OBJS): xenstore_%.o: xenstore_client.c
        $(COMPILE.c) -DCLIENT_$(*F) -o $@ $<
 
-xsls: xsls.o
-       $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@
+xsls: xsls.o libxenstore.so
+       $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@
 
 xenstored_test: xenstored_core_test.o xenstored_watch_test.o 
xenstored_domain_test.o xenstored_transaction_test.o xs_lib.o talloc_test.o 
fake_libxc.o utils.o tdb.o
        $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -o $@
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/fake_libxc.c
--- a/tools/xenstore/fake_libxc.c       Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/fake_libxc.c       Mon Oct 24 15:08:13 2005
@@ -30,10 +30,11 @@
 #include "xenstored_core.h"
 #include "xenstored_domain.h"
 #include "xenstored_test.h"
+#include <xenctrl.h>
 
 static int sigfd;
 static int xs_test_pid;
-static u16 port;
+static uint16_t port;
 
 /* The event channel maps to a signal, shared page to an mmapped file. */
 void evtchn_notify(int local_port)
@@ -43,7 +44,7 @@
                barf_perror("fake event channel failed");
 }
 
-void *xc_map_foreign_range(int xc_handle, u32 dom __attribute__((unused)),
+void *xc_map_foreign_range(int xc_handle, uint32_t dom __attribute__((unused)),
                           int size, int prot,
                           unsigned long mfn __attribute__((unused)))
 {
@@ -83,7 +84,7 @@
 }
 
 int xc_domain_getinfo(int xc_handle __attribute__((unused)),
-                     u32 first_domid, unsigned int max_doms,
+                     uint32_t first_domid, unsigned int max_doms,
                       xc_dominfo_t *info)
 {
        assert(max_doms == 1);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/tdb.c
--- a/tools/xenstore/tdb.c      Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/tdb.c      Mon Oct 24 15:08:13 2005
@@ -154,9 +154,9 @@
 }
 
 /* Endian conversion: we only ever deal with 4 byte quantities */
-static void *convert(void *buf, u32 size)
-{
-       u32 i, *p = buf;
+static void *convert(void *buf, uint32_t size)
+{
+       uint32_t i, *p = buf;
        for (i = 0; i < size / 4; i++)
                p[i] = TDB_BYTEREV(p[i]);
        return buf;
@@ -171,8 +171,8 @@
        tdb_len rec_len; /* total byte length of record */
        tdb_len key_len; /* byte length of key */
        tdb_len data_len; /* byte length of data */
-       u32 full_hash; /* the full 32 bit hash of the key */
-       u32 magic;   /* try to catch errors */
+       uint32_t full_hash; /* the full 32 bit hash of the key */
+       uint32_t magic;   /* try to catch errors */
        /* the following union is implied:
                union {
                        char record[rec_len];
@@ -180,7 +180,7 @@
                                char key[key_len];
                                char data[data_len];
                        }
-                       u32 totalsize; (tailer)
+                       uint32_t totalsize; (tailer)
                }
        */
 };
@@ -294,10 +294,10 @@
 }
 
 /* This is based on the hash algorithm from gdbm */
-static u32 default_tdb_hash(TDB_DATA *key)
-{
-       u32 value;      /* Used to compute the hash value.  */
-       u32   i;        /* Used to cycle through random values. */
+static uint32_t default_tdb_hash(TDB_DATA *key)
+{
+       uint32_t value; /* Used to compute the hash value.  */
+       uint32_t   i;   /* Used to cycle through random values. */
 
        /* Set the initial value from the key size. */
        for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
@@ -399,7 +399,7 @@
 static int tdb_key_eq(TDB_CONTEXT *tdb, tdb_off off, TDB_DATA key)
 {
        char buf[64];
-       u32 len;
+       uint32_t len;
 
        if (tdb_oob(tdb, off + key.dsize, 0) != 0)
                return -1;
@@ -1030,7 +1030,7 @@
 
 /* Returns 0 on fail.  On success, return offset of record, and fills
    in rec */
-static tdb_off tdb_find(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash,
+static tdb_off tdb_find(TDB_CONTEXT *tdb, TDB_DATA key, uint32_t hash,
                        struct list_struct *r)
 {
        tdb_off rec_ptr;
@@ -1058,10 +1058,10 @@
 }
 
 /* As tdb_find, but if you succeed, keep the lock */
-static tdb_off tdb_find_lock_hash(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash, 
int locktype,
+static tdb_off tdb_find_lock_hash(TDB_CONTEXT *tdb, TDB_DATA key, uint32_t 
hash, int locktype,
                             struct list_struct *rec)
 {
-       u32 rec_ptr;
+       uint32_t rec_ptr;
 
        if (tdb_lock(tdb, BUCKET(hash), locktype) == -1)
                return 0;
@@ -1089,7 +1089,7 @@
 /* Error string for the last tdb error */
 const char *tdb_errorstr(TDB_CONTEXT *tdb)
 {
-       u32 i;
+       uint32_t i;
        for (i = 0; i < sizeof(emap) / sizeof(struct tdb_errname); i++)
                if (tdb->ecode == emap[i].ecode)
                        return emap[i].estring;
@@ -1101,7 +1101,7 @@
    on failure return -1.
 */
 
-static int tdb_update_hash(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash, TDB_DATA 
dbuf)
+static int tdb_update_hash(TDB_CONTEXT *tdb, TDB_DATA key, uint32_t hash, 
TDB_DATA dbuf)
 {
        struct list_struct rec;
        tdb_off rec_ptr;
@@ -1141,7 +1141,7 @@
        tdb_off rec_ptr;
        struct list_struct rec;
        TDB_DATA ret;
-       u32 hash;
+       uint32_t hash;
 
        /* find which hash bucket it is in */
        hash = tdb->hash_fn(&key);
@@ -1161,7 +1161,7 @@
    this doesn't match the conventions in the rest of this module, but is
    compatible with gdbm
 */
-static int tdb_exists_hash(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash)
+static int tdb_exists_hash(TDB_CONTEXT *tdb, TDB_DATA key, uint32_t hash)
 {
        struct list_struct rec;
        
@@ -1173,7 +1173,7 @@
 
 int tdb_exists(TDB_CONTEXT *tdb, TDB_DATA key)
 {
-       u32 hash = tdb->hash_fn(&key);
+       uint32_t hash = tdb->hash_fn(&key);
        return tdb_exists_hash(tdb, key, hash);
 }
 
@@ -1210,7 +1210,7 @@
 static int unlock_record(TDB_CONTEXT *tdb, tdb_off off)
 {
        struct tdb_traverse_lock *i;
-       u32 count = 0;
+       uint32_t count = 0;
 
        if (off == 0)
                return 0;
@@ -1293,10 +1293,10 @@
                   system (testing using ldbtest).
                 */
                if (!tlock->off && tlock->hash != 0) {
-                       u32 off;
+                       uint32_t off;
                        if (tdb->map_ptr) {
                                for (;tlock->hash < 
tdb->header.hash_size;tlock->hash++) {
-                                       if (0 != *(u32 
*)(TDB_HASH_TOP(tlock->hash) + (unsigned char *)tdb->map_ptr)) {
+                                       if (0 != *(uint32_t 
*)(TDB_HASH_TOP(tlock->hash) + (unsigned char *)tdb->map_ptr)) {
                                                break;
                                        }
                                }
@@ -1459,7 +1459,7 @@
 /* find the next entry in the database, returning its key */
 TDB_DATA tdb_nextkey(TDB_CONTEXT *tdb, TDB_DATA oldkey)
 {
-       u32 oldhash;
+       uint32_t oldhash;
        TDB_DATA key = tdb_null;
        struct list_struct rec;
        char *k = NULL;
@@ -1513,7 +1513,7 @@
 }
 
 /* delete an entry in the database given a key */
-static int tdb_delete_hash(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash)
+static int tdb_delete_hash(TDB_CONTEXT *tdb, TDB_DATA key, uint32_t hash)
 {
        tdb_off rec_ptr;
        struct list_struct rec;
@@ -1529,7 +1529,7 @@
 
 int tdb_delete(TDB_CONTEXT *tdb, TDB_DATA key)
 {
-       u32 hash = tdb->hash_fn(&key);
+       uint32_t hash = tdb->hash_fn(&key);
        return tdb_delete_hash(tdb, key, hash);
 }
 
@@ -1541,7 +1541,7 @@
 int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
 {
        struct list_struct rec;
-       u32 hash;
+       uint32_t hash;
        tdb_off rec_ptr;
        char *p = NULL;
        int ret = 0;
@@ -1622,7 +1622,7 @@
    is <= the old data size and the key exists.
    on failure return -1. Record must be locked before calling.
 */
-static int tdb_append_inplace(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash, 
TDB_DATA new_dbuf)
+static int tdb_append_inplace(TDB_CONTEXT *tdb, TDB_DATA key, uint32_t hash, 
TDB_DATA new_dbuf)
 {
        struct list_struct rec;
        tdb_off rec_ptr;
@@ -1656,7 +1656,7 @@
 int tdb_append(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA new_dbuf)
 {
        struct list_struct rec;
-       u32 hash;
+       uint32_t hash;
        tdb_off rec_ptr;
        char *p = NULL;
        int ret = 0;
@@ -1790,7 +1790,7 @@
        struct stat st;
        int rev = 0, locked = 0;
        uint8_t *vp;
-       u32 vertest;
+       uint32_t vertest;
 
        if (!(tdb = talloc_zero(name, TDB_CONTEXT))) {
                /* Can't log this */
@@ -1869,8 +1869,8 @@
                rev = (tdb->flags & TDB_CONVERT);
        }
        vp = (uint8_t *)&tdb->header.version;
-       vertest = (((u32)vp[0]) << 24) | (((u32)vp[1]) << 16) |
-                 (((u32)vp[2]) << 8) | (u32)vp[3];
+       vertest = (((uint32_t)vp[0]) << 24) | (((uint32_t)vp[1]) << 16) |
+                 (((uint32_t)vp[2]) << 8) | (uint32_t)vp[3];
        tdb->flags |= (vertest==TDB_VERSION) ? TDB_BIGENDIAN : 0;
        if (!rev)
                tdb->flags &= ~TDB_CONVERT;
@@ -2000,7 +2000,7 @@
 /* lock/unlock entire database */
 int tdb_lockall(TDB_CONTEXT *tdb)
 {
-       u32 i;
+       uint32_t i;
 
        /* There are no locks on read-only dbs */
        if (tdb->read_only)
@@ -2011,7 +2011,7 @@
 
        /* If error, release locks we have... */
        if (i < tdb->header.hash_size) {
-               u32 j;
+               uint32_t j;
 
                for ( j = 0; j < i; j++)
                        tdb_unlock(tdb, j, F_WRLCK);
@@ -2022,7 +2022,7 @@
 }
 void tdb_unlockall(TDB_CONTEXT *tdb)
 {
-       u32 i;
+       uint32_t i;
        for (i=0; i < tdb->header.hash_size; i++)
                tdb_unlock(tdb, i, F_WRLCK);
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/tdb.h
--- a/tools/xenstore/tdb.h      Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/tdb.h      Mon Oct 24 15:08:13 2005
@@ -53,8 +53,8 @@
                TDB_ERR_OOM, TDB_ERR_EXISTS, TDB_ERR_NOLOCK, 
TDB_ERR_LOCK_TIMEOUT,
                TDB_ERR_NOEXIST};
 
-#ifndef u32
-#define u32 unsigned
+#ifndef uint32_t
+#define uint32_t unsigned
 #endif
 
 typedef struct TDB_DATA {
@@ -62,27 +62,27 @@
        size_t dsize;
 } TDB_DATA;
 
-typedef u32 tdb_len;
-typedef u32 tdb_off;
+typedef uint32_t tdb_len;
+typedef uint32_t tdb_off;
 
 /* this is stored at the front of every database */
 struct tdb_header {
        char magic_food[32]; /* for /etc/magic */
-       u32 version; /* version of the code */
-       u32 hash_size; /* number of hash entries */
+       uint32_t version; /* version of the code */
+       uint32_t hash_size; /* number of hash entries */
        tdb_off rwlocks;
        tdb_off reserved[31];
 };
 
 struct tdb_lock_type {
-       u32 count;
-       u32 ltype;
+       uint32_t count;
+       uint32_t ltype;
 };
 
 struct tdb_traverse_lock {
        struct tdb_traverse_lock *next;
-       u32 off;
-       u32 hash;
+       uint32_t off;
+       uint32_t hash;
 };
 
 #ifndef PRINTF_ATTRIBUTE
@@ -99,19 +99,19 @@
        struct tdb_lock_type *locked; /* array of chain locks */
        enum TDB_ERROR ecode; /* error code for last tdb error */
        struct tdb_header header; /* a cached copy of the header */
-       u32 flags; /* the flags passed to tdb_open */
+       uint32_t flags; /* the flags passed to tdb_open */
        struct tdb_traverse_lock travlocks; /* current traversal locks */
        struct tdb_context *next; /* all tdbs to avoid multiple opens */
        dev_t device;   /* uniquely identifies this tdb */
        ino_t inode;    /* uniquely identifies this tdb */
        void (*log_fn)(struct tdb_context *tdb, int level, const char *, ...) 
PRINTF_ATTRIBUTE(3,4); /* logging function */
-       u32 (*hash_fn)(TDB_DATA *key);
+       uint32_t (*hash_fn)(TDB_DATA *key);
        int open_flags; /* flags used in the open - needed by reopen */
 } TDB_CONTEXT;
 
 typedef int (*tdb_traverse_func)(TDB_CONTEXT *, TDB_DATA, TDB_DATA, void *);
 typedef void (*tdb_log_func)(TDB_CONTEXT *, int , const char *, ...);
-typedef u32 (*tdb_hash_func)(TDB_DATA *key);
+typedef uint32_t (*tdb_hash_func)(TDB_DATA *key);
 
 TDB_CONTEXT *tdb_open(const char *name, int hash_size, int tdb_flags,
                      int open_flags, mode_t mode);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstore_client.c
--- a/tools/xenstore/xenstore_client.c  Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstore_client.c  Mon Oct 24 15:08:13 2005
@@ -4,17 +4,50 @@
  * this archive for more details.
  *
  * Copyright (C) 2005 by Christian Limpach
+ * Copyright (C) 2005 XenSource Ltd.
  *
  */
 
 #include <err.h>
+#include <errno.h>
 #include <fcntl.h>
 #include <getopt.h>
+#include <stdarg.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <xs.h>
-#include <errno.h>
+
+static char *output_buf = NULL;
+static int output_pos = 0;
+
+#if defined(CLIENT_read) || defined(CLIENT_list)
+static int output_size = 0;
+
+static void
+output(const char *fmt, ...) {
+    va_list ap;
+    int len;
+    char buf[1];
+
+    va_start(ap, fmt);
+    len = vsnprintf(buf, 1, fmt, ap);
+    if (len < 0)
+       err(1, "output");
+    va_end(ap);
+    if (len + 1 + output_pos > output_size) {
+       output_size += len + 1024;
+       output_buf = realloc(output_buf, output_size);
+       if (output_buf == NULL)
+           err(1, "malloc");
+    }
+    va_start(ap, fmt);
+    if (vsnprintf(&output_buf[output_pos], len + 1, fmt, ap) != len)
+       err(1, "output");
+    va_end(ap);
+    output_pos += len;
+}
+#endif
 
 static void
 usage(const char *progname)
@@ -23,36 +56,152 @@
     errx(1, "Usage: %s [-h] [-p] [-s] key [...]", progname);
 #elif defined(CLIENT_write)
     errx(1, "Usage: %s [-h] [-s] key value [...]", progname);
-#elif defined(CLIENT_rm) || defined(CLIENT_exists) || defined(CLIENT_list)
+#elif defined(CLIENT_rm)
+    errx(1, "Usage: %s [-h] [-s] [-t] key [...]", progname);
+#elif defined(CLIENT_exists) || defined(CLIENT_list)
     errx(1, "Usage: %s [-h] [-s] key [...]", progname);
 #endif
 }
+
+
+#if defined(CLIENT_rm)
+static int
+do_rm(char *path, struct xs_handle *xsh, struct xs_transaction_handle *xth)
+{
+    if (xs_rm(xsh, xth, path)) {
+        return 0;
+    }
+    else {
+        warnx("could not remove path %s", path);
+        return 1;
+    }
+}
+#endif
+
+
+static int
+perform(int optind, int argc, char **argv, struct xs_handle *xsh,
+        struct xs_transaction_handle *xth, int prefix, int tidy)
+{
+    while (optind < argc) {
+#if defined(CLIENT_read)
+       char *val = xs_read(xsh, xth, argv[optind], NULL);
+       if (val == NULL) {
+           warnx("couldn't read path %s", argv[optind]);
+           return 1;
+       }
+       if (prefix)
+           output("%s: ", argv[optind]);
+       output("%s\n", val);
+       free(val);
+       optind++;
+#elif defined(CLIENT_write)
+       if (!xs_write(xsh, xth, argv[optind], argv[optind + 1],
+                      strlen(argv[optind + 1]))) {
+           warnx("could not write path %s", argv[optind]);
+           return 1;
+       }
+       optind += 2;
+#elif defined(CLIENT_rm)
+        /* Remove the specified path.  If the tidy flag is set, then also
+           remove any containing directories that are both empty and have no
+           value attached, and repeat, recursing all the way up to the root if
+           necessary.
+        */
+
+        char *path = argv[optind];
+
+        if (tidy) {
+            /* Copy path, because we can't modify argv because we will need it
+               again if xs_transaction_end gives us EAGAIN. */
+            char *p = malloc(strlen(path) + 1);
+            strcpy(p, path);
+            path = p;
+
+        again:
+            if (do_rm(path, xsh, xth)) {
+                return 1;
+            }
+
+            char *slash = strrchr(p, '/');
+            if (slash) {
+                char *val;
+                *slash = '\0';
+                val = xs_read(xsh, xth, p, NULL);
+                if (val && strlen(val) == 0) {
+                    unsigned int num;
+                    char ** list = xs_directory(xsh, xth, p, &num);
+
+                    if (list && num == 0) {
+                        goto again;
+                    }
+                }
+            }
+
+            free(path);
+        }
+        else {
+            if (do_rm(path, xsh, xth)) {
+                return 1;
+            }
+        }
+
+       optind++;
+#elif defined(CLIENT_exists)
+       char *val = xs_read(xsh, xth, argv[optind], NULL);
+       if (val == NULL) {
+           return 1;
+       }
+       free(val);
+       optind++;
+#elif defined(CLIENT_list)
+       unsigned int i, num;
+       char **list = xs_directory(xsh, xth, argv[optind], &num);
+       if (list == NULL) {
+           warnx("could not list path %s", argv[optind]);
+           return 1;
+       }
+       for (i = 0; i < num; i++) {
+           if (prefix)
+               output("%s/", argv[optind]);
+           output("%s\n", list[i]);
+       }
+       free(list);
+       optind++;
+#endif
+    }
+
+    return 0;
+}
+
 
 int
 main(int argc, char **argv)
 {
     struct xs_handle *xsh;
     struct xs_transaction_handle *xth;
-    bool success;
     int ret = 0, socket = 0;
-#if defined(CLIENT_read) || defined(CLIENT_list)
     int prefix = 0;
-#endif
+    int tidy = 0;
 
     while (1) {
        int c, index = 0;
        static struct option long_options[] = {
            {"help", 0, 0, 'h'},
+            {"socket", 0, 0, 's'},
 #if defined(CLIENT_read) || defined(CLIENT_list)
            {"prefix", 0, 0, 'p'},
-#endif
-            {"socket", 0, 0, 's'},
+#elif defined(CLIENT_rm)
+            {"tidy",   0, 0, 't'},
+#endif
            {0, 0, 0, 0}
        };
 
        c = getopt_long(argc, argv, "hs"
 #if defined(CLIENT_read) || defined(CLIENT_list)
                        "p"
+#elif defined(CLIENT_rm)
+                        "t"
 #endif
                        , long_options, &index);
        if (c == -1)
@@ -69,6 +218,10 @@
        case 'p':
            prefix = 1;
            break;
+#elif defined(CLIENT_rm)
+       case 't':
+           tidy = 1;
+           break;
 #endif
        }
     }
@@ -93,68 +246,18 @@
     if (xth == NULL)
        errx(1, "couldn't start transaction");
 
-    while (optind < argc) {
-#if defined(CLIENT_read)
-       char *val = xs_read(xsh, xth, argv[optind], NULL);
-       if (val == NULL) {
-           warnx("couldn't read path %s", argv[optind]);
-           ret = 1;
-           goto out;
-       }
-       if (prefix)
-           printf("%s: ", argv[optind]);
-       printf("%s\n", val);
-       free(val);
-       optind++;
-#elif defined(CLIENT_write)
-       success = xs_write(xsh, xth, argv[optind], argv[optind + 1],
-                          strlen(argv[optind + 1]));
-       if (!success) {
-           warnx("could not write path %s", argv[optind]);
-           ret = 1;
-           goto out;
-       }
-       optind += 2;
-#elif defined(CLIENT_rm)
-       success = xs_rm(xsh, xth, argv[optind]);
-       if (!success) {
-           warnx("could not remove path %s", argv[optind]);
-           ret = 1;
-           goto out;
-       }
-       optind++;
-#elif defined(CLIENT_exists)
-       char *val = xs_read(xsh, xth, argv[optind], NULL);
-       if (val == NULL) {
-           ret = 1;
-           goto out;
-       }
-       free(val);
-       optind++;
-#elif defined(CLIENT_list)
-       unsigned int i, num;
-       char **list = xs_directory(xsh, xth, argv[optind], &num);
-       if (list == NULL) {
-           warnx("could not list path %s", argv[optind]);
-           ret = 1;
-           goto out;
-       }
-       for (i = 0; i < num; i++) {
-           if (prefix)
-               printf("%s/", argv[optind]);
-           printf("%s\n", list[i]);
-       }
-       free(list);
-       optind++;
-#endif
-    }
-
- out:
-    success = xs_transaction_end(xsh, xth, ret ? true : false);
-    if (!success) {
-       if (ret == 0 && errno == EAGAIN)
+    ret = perform(optind, argc, argv, xsh, xth, prefix, tidy);
+
+    if (!xs_transaction_end(xsh, xth, ret)) {
+       if (ret == 0 && errno == EAGAIN) {
+           output_pos = 0;
            goto again;
+       }
        errx(1, "couldn't end transaction");
     }
+
+    if (output_pos)
+       printf("%s", output_buf);
+
     return ret;
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstored_core.c
--- a/tools/xenstore/xenstored_core.c   Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstored_core.c   Mon Oct 24 15:08:13 2005
@@ -51,7 +51,7 @@
 #include "xenctrl.h"
 #include "tdb.h"
 
-int event_fd;
+extern int eventchn_fd; /* in xenstored_domain.c */
 
 static bool verbose;
 LIST_HEAD(connections);
@@ -166,6 +166,7 @@
        case XS_SET_PERMS: return "SET_PERMS";
        case XS_WATCH_EVENT: return "WATCH_EVENT";
        case XS_ERROR: return "ERROR";
+       case XS_IS_DOMAIN_INTRODUCED: return "XS_IS_DOMAIN_INTRODUCED";
        default:
                return "**UNKNOWN**";
        }
@@ -177,12 +178,18 @@
 {
        char string[64];
        unsigned int i;
+       time_t now;
+       struct tm *tm;
 
        if (tracefd < 0)
                return;
 
+       now = time(NULL);
+       tm = localtime(&now);
+
        write(tracefd, prefix, strlen(prefix));
-       sprintf(string, " %p ", conn);
+       sprintf(string, " %p %0d:%0d:%0d ", conn, tm->tm_hour, tm->tm_min,
+               tm->tm_sec);
        write(tracefd, string, strlen(string));
        write(tracefd, sockmsg_string(data->hdr.msg.type),
              strlen(sockmsg_string(data->hdr.msg.type)));
@@ -246,8 +253,9 @@
 
        if (out->inhdr) {
                if (verbose)
-                       xprintf("Writing msg %s (%s) out to %p\n",
+                       xprintf("Writing msg %s (%.*s) out to %p\n",
                                sockmsg_string(out->hdr.msg.type),
+                               out->hdr.msg.len,
                                out->buffer, conn);
                ret = conn->write(conn, out->hdr.raw + out->used,
                                  sizeof(out->hdr) - out->used);
@@ -319,9 +327,9 @@
        FD_SET(ro_sock, inset);
        if (ro_sock > max)
                max = ro_sock;
-       FD_SET(event_fd, inset);
-       if (event_fd > max)
-               max = event_fd;
+       FD_SET(eventchn_fd, inset);
+       if (eventchn_fd > max)
+               max = eventchn_fd;
        list_for_each_entry(i, &connections, list) {
                if (i->domain)
                        continue;
@@ -378,7 +386,7 @@
 static struct node *read_node(struct connection *conn, const char *name)
 {
        TDB_DATA key, data;
-       u32 *p;
+       uint32_t *p;
        struct node *node;
 
        key.dptr = (void *)name;
@@ -400,7 +408,7 @@
        talloc_steal(node, data.dptr);
 
        /* Datalen, childlen, number of permissions */
-       p = (u32 *)data.dptr;
+       p = (uint32_t *)data.dptr;
        node->num_perms = p[0];
        node->datalen = p[1];
        node->childlen = p[2];
@@ -423,14 +431,14 @@
        key.dptr = (void *)node->name;
        key.dsize = strlen(node->name);
 
-       data.dsize = 3*sizeof(u32)
+       data.dsize = 3*sizeof(uint32_t)
                + node->num_perms*sizeof(node->perms[0])
                + node->datalen + node->childlen;
        data.dptr = talloc_size(node, data.dsize);
-       ((u32 *)data.dptr)[0] = node->num_perms;
-       ((u32 *)data.dptr)[1] = node->datalen;
-       ((u32 *)data.dptr)[2] = node->childlen;
-       p = data.dptr + 3 * sizeof(u32);
+       ((uint32_t *)data.dptr)[0] = node->num_perms;
+       ((uint32_t *)data.dptr)[1] = node->datalen;
+       ((uint32_t *)data.dptr)[2] = node->childlen;
+       p = data.dptr + 3 * sizeof(uint32_t);
 
        memcpy(p, node->perms, node->num_perms*sizeof(node->perms[0]));
        p += node->num_perms*sizeof(node->perms[0]);
@@ -668,7 +676,7 @@
 {
        unsigned int i;
        char *strings = NULL;
-       char buffer[MAX_STRLEN(domid_t) + 1];
+       char buffer[MAX_STRLEN(unsigned int) + 1];
 
        for (*len = 0, i = 0; i < num; i++) {
                if (!xs_perm_to_string(&perms[i], buffer))
@@ -946,9 +954,29 @@
        corrupt(conn, "Can't find child '%s' in %s", childname, node->name);
 }
 
+
+static int _rm(struct connection *conn, struct node *node, const char *name)
+{
+       /* Delete from parent first, then if something explodes fsck cleans. */
+       struct node *parent = read_node(conn, get_parent(name));
+       if (!parent) {
+               send_error(conn, EINVAL);
+               return 0;
+       }
+
+       if (!delete_child(conn, parent, basename(name))) {
+               send_error(conn, EINVAL);
+               return 0;
+       }
+
+       delete_node(conn, node);
+       return 1;
+}
+
+
 static void do_rm(struct connection *conn, const char *name)
 {
-       struct node *node, *parent;
+       struct node *node;
 
        name = canonicalize(conn, name);
        node = get_node(conn, name, XS_PERM_WRITE);
@@ -972,23 +1000,13 @@
                return;
        }
 
-       /* Delete from parent first, then if something explodes fsck cleans. */
-       parent = read_node(conn, get_parent(name));
-       if (!parent) {
-               send_error(conn, EINVAL);
-               return;
-       }
-
-       if (!delete_child(conn, parent, basename(name))) {
-               send_error(conn, EINVAL);
-               return;
-       }
-
-       delete_node(conn, node);
-       add_change_node(conn->transaction, name, true);
-       fire_watches(conn, name, true);
-       send_ack(conn, XS_RM);
-}
+       if (_rm(conn, node, name)) {
+               add_change_node(conn->transaction, name, true);
+               fire_watches(conn, name, true);
+               send_ack(conn, XS_RM);
+       }
+}
+
 
 static void do_get_perms(struct connection *conn, const char *name)
 {
@@ -1130,6 +1148,10 @@
 
        case XS_INTRODUCE:
                do_introduce(conn, in);
+               break;
+
+       case XS_IS_DOMAIN_INTRODUCED:
+               do_is_domain_introduced(conn, onearg(in));
                break;
 
        case XS_RELEASE:
@@ -1416,14 +1438,36 @@
 }
 
 
+static void usage(void)
+{
+       fprintf(stderr,
+"Usage:\n"
+"\n"
+"  xenstored <options>\n"
+"\n"
+"where options may include:\n"
+"\n"
+"  --no-domain-init    to state that xenstored should not initialise dom0,\n"
+"  --pid-file <file>   giving a file for the daemon's pid to be written,\n"
+"  --help              to output this message,\n"
+"  --no-fork           to request that the daemon does not fork,\n"
+"  --output-pid        to request that the pid of the daemon is output,\n"
+"  --trace-file <file> giving the file for logging, and\n"
+"  --verbose           to request verbose execution.\n");
+}
+
+
 static struct option options[] = {
        { "no-domain-init", 0, NULL, 'D' },
        { "pid-file", 1, NULL, 'F' },
+       { "help", 0, NULL, 'H' },
        { "no-fork", 0, NULL, 'N' },
        { "output-pid", 0, NULL, 'P' },
        { "trace-file", 1, NULL, 'T' },
        { "verbose", 0, NULL, 'V' },
        { NULL, 0, NULL, 0 } };
+
+extern void dump_conn(struct connection *conn); 
 
 int main(int argc, char *argv[])
 {
@@ -1435,7 +1479,7 @@
        bool no_domain_init = false;
        const char *pidfile = NULL;
 
-       while ((opt = getopt_long(argc, argv, "DF:NPT:V", options,
+       while ((opt = getopt_long(argc, argv, "DF:HNPT:V", options,
                                  NULL)) != -1) {
                switch (opt) {
                case 'D':
@@ -1444,6 +1488,9 @@
                case 'F':
                        pidfile = optarg;
                        break;
+               case 'H':
+                       usage();
+                       return 0;
                case 'N':
                        dofork = false;
                        break;
@@ -1509,12 +1556,12 @@
            || listen(*ro_sock, 1) != 0)
                barf_perror("Could not listen on sockets");
 
-       /* If we're the first, create .perms file for root. */
+       /* Setup the database */
        setup_structure();
 
        /* Listen to hypervisor. */
        if (!no_domain_init)
-               event_fd = domain_init();
+               domain_init();
 
        /* Restore existing connections. */
        restore_existing_connections();
@@ -1555,7 +1602,7 @@
                if (FD_ISSET(*ro_sock, &inset))
                        accept_connection(*ro_sock, false);
 
-               if (FD_ISSET(event_fd, &inset))
+               if (FD_ISSET(eventchn_fd, &inset))
                        handle_event();
 
                list_for_each_entry(i, &connections, list) {
@@ -1586,7 +1633,7 @@
                                goto more;
                        }
 
-                       if (domain_can_write(i)) {
+                       if (domain_can_write(i) && !list_empty(&i->out_list)) {
                                handle_output(i);
                                goto more;
                        }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstored_core.h
--- a/tools/xenstore/xenstored_core.h   Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstored_core.h   Mon Oct 24 15:08:13 2005
@@ -60,7 +60,7 @@
        int fd;
 
        /* Who am I? 0 for socket connections. */
-       domid_t id;
+       unsigned int id;
 
        /* Is this a read-only connection? */
        bool can_write;
@@ -76,7 +76,7 @@
 
        /* List of in-progress transactions. */
        struct list_head transaction_list;
-       u32 next_transaction_id;
+       uint32_t next_transaction_id;
 
        /* The domain I'm associated with, if any. */
        struct domain *domain;
@@ -154,6 +154,7 @@
 
 struct connection *new_connection(connwritefn_t *write, connreadfn_t *read);
 
+
 /* Is this a valid node name? */
 bool is_valid_nodename(const char *node);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstored_domain.c
--- a/tools/xenstore/xenstored_domain.c Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstored_domain.c Mon Oct 24 15:08:13 2005
@@ -33,29 +33,31 @@
 #include "talloc.h"
 #include "xenstored_core.h"
 #include "xenstored_domain.h"
+#include "xenstored_proc.h"
 #include "xenstored_watch.h"
 #include "xenstored_test.h"
 
+#include <xenctrl.h>
 #include <xen/linux/evtchn.h>
 
 static int *xc_handle;
-static int eventchn_fd;
 static int virq_port;
-static unsigned int ringbuf_datasize;
+
+int eventchn_fd = -1; 
 
 struct domain
 {
        struct list_head list;
 
        /* The id of this domain */
-       domid_t domid;
+       unsigned int domid;
 
        /* Event channel port */
-       u16 port;
+       uint16_t port;
 
        /* The remote end of the event channel, used only to validate
           repeated domain introductions. */
-       u16 remote_port;
+       uint16_t remote_port;
 
        /* The mfn associated with the event channel, used only to validate
           repeated domain introductions. */
@@ -65,10 +67,7 @@
        char *path;
 
        /* Shared page. */
-       void *page;
-
-       /* Input and output ringbuffer heads. */
-       struct ringbuf_head *input, *output;
+       struct xenstore_domain_interface *interface;
 
        /* The connection associated with this. */
        struct connection *conn;
@@ -79,144 +78,101 @@
 
 static LIST_HEAD(domains);
 
-struct ringbuf_head
-{
-       u32 write; /* Next place to write to */
-       u32 read; /* Next place to read from */
-       u8 flags;
-       char buf[0];
-} __attribute__((packed));
-
 #ifndef TESTING
 static void evtchn_notify(int port)
 {
+       int rc; 
+
        struct ioctl_evtchn_notify notify;
        notify.port = port;
-       (void)ioctl(event_fd, IOCTL_EVTCHN_NOTIFY, &notify);
+       rc = ioctl(eventchn_fd, IOCTL_EVTCHN_NOTIFY, &notify);
 }
 #else
 extern void evtchn_notify(int port);
 #endif
 
 /* FIXME: Mark connection as broken (close it?) when this happens. */
-static bool check_buffer(const struct ringbuf_head *h)
-{
-       return (h->write < ringbuf_datasize && h->read < ringbuf_datasize);
-}
-
-/* We can't fill last byte: would look like empty buffer. */
-static void *get_output_chunk(const struct ringbuf_head *h,
-                             void *buf, u32 *len)
-{
-       u32 read_mark;
-
-       if (h->read == 0)
-               read_mark = ringbuf_datasize - 1;
-       else
-               read_mark = h->read - 1;
-
-       /* Here to the end of buffer, unless they haven't read some out. */
-       *len = ringbuf_datasize - h->write;
-       if (read_mark >= h->write)
-               *len = read_mark - h->write;
-       return buf + h->write;
-}
-
-static const void *get_input_chunk(const struct ringbuf_head *h,
-                                  const void *buf, u32 *len)
-{
-       /* Here to the end of buffer, unless they haven't written some. */
-       *len = ringbuf_datasize - h->read;
-       if (h->write >= h->read)
-               *len = h->write - h->read;
-       return buf + h->read;
-}
-
-static void update_output_chunk(struct ringbuf_head *h, u32 len)
-{
-       h->write += len;
-       if (h->write == ringbuf_datasize)
-               h->write = 0;
-}
-
-static void update_input_chunk(struct ringbuf_head *h, u32 len)
-{
-       h->read += len;
-       if (h->read == ringbuf_datasize)
-               h->read = 0;
-}
-
-static bool buffer_has_input(const struct ringbuf_head *h)
-{
-       u32 len;
-
-       get_input_chunk(h, NULL, &len);
-       return (len != 0);
-}
-
-static bool buffer_has_output_room(const struct ringbuf_head *h)
-{
-       u32 len;
-
-       get_output_chunk(h, NULL, &len);
-       return (len != 0);
+static bool check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
+{
+       return ((prod - cons) <= XENSTORE_RING_SIZE);
+}
+
+static void *get_output_chunk(XENSTORE_RING_IDX cons,
+                             XENSTORE_RING_IDX prod,
+                             char *buf, uint32_t *len)
+{
+       *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
+       if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
+               *len = XENSTORE_RING_SIZE - (prod - cons);
+       return buf + MASK_XENSTORE_IDX(prod);
+}
+
+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
+                                  XENSTORE_RING_IDX prod,
+                                  const char *buf, uint32_t *len)
+{
+       *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+       if ((prod - cons) < *len)
+               *len = prod - cons;
+       return buf + MASK_XENSTORE_IDX(cons);
 }
 
 static int writechn(struct connection *conn, const void *data, unsigned int 
len)
 {
-       u32 avail;
+       uint32_t avail;
        void *dest;
-       struct ringbuf_head h;
-
-       /* Must read head once, and before anything else, and verified. */
-       h = *conn->domain->output;
+       struct xenstore_domain_interface *intf = conn->domain->interface;
+       XENSTORE_RING_IDX cons, prod;
+
+       /* Must read indexes once, and before anything else, and verified. */
+       cons = intf->rsp_cons;
+       prod = intf->rsp_prod;
        mb();
-       if (!check_buffer(&h)) {
+       if (!check_indexes(cons, prod)) {
                errno = EIO;
                return -1;
        }
 
-       dest = get_output_chunk(&h, conn->domain->output->buf, &avail);
+       dest = get_output_chunk(cons, prod, intf->rsp, &avail);
        if (avail < len)
                len = avail;
 
        memcpy(dest, data, len);
        mb();
-       update_output_chunk(conn->domain->output, len);
+       intf->rsp_prod += len;
+
        evtchn_notify(conn->domain->port);
+
        return len;
 }
 
 static int readchn(struct connection *conn, void *data, unsigned int len)
 {
-       u32 avail;
+       uint32_t avail;
        const void *src;
-       struct ringbuf_head h;
-       bool was_full;
-
-       /* Must read head once, and before anything else, and verified. */
-       h = *conn->domain->input;
+       struct xenstore_domain_interface *intf = conn->domain->interface;
+       XENSTORE_RING_IDX cons, prod;
+
+       /* Must read indexes once, and before anything else, and verified. */
+       cons = intf->req_cons;
+       prod = intf->req_prod;
        mb();
 
-       if (!check_buffer(&h)) {
+       if (!check_indexes(cons, prod)) {
                errno = EIO;
                return -1;
        }
 
-       src = get_input_chunk(&h, conn->domain->input->buf, &avail);
+       src = get_input_chunk(cons, prod, intf->req, &avail);
        if (avail < len)
                len = avail;
 
-       was_full = !buffer_has_output_room(&h);
        memcpy(data, src, len);
        mb();
-       update_input_chunk(conn->domain->input, len);
-       /* FIXME: Probably not neccessary. */
-       mb();
-
-       /* If it was full, tell them we've taken some. */
-       if (was_full)
-               evtchn_notify(conn->domain->port);
+       intf->req_cons += len;
+
+       evtchn_notify(conn->domain->port);
+
        return len;
 }
 
@@ -233,8 +189,8 @@
                        eprintf("> Unbinding port %i failed!\n", domain->port);
        }
 
-       if (domain->page)
-               munmap(domain->page, getpagesize());
+       if (domain->interface)
+               munmap(domain->interface, getpagesize());
 
        return 0;
 }
@@ -268,66 +224,67 @@
 /* We scan all domains rather than use the information given here. */
 void handle_event(void)
 {
-       u16 port;
-
-       if (read(event_fd, &port, sizeof(port)) != sizeof(port))
+       uint16_t port;
+
+       if (read(eventchn_fd, &port, sizeof(port)) != sizeof(port))
                barf_perror("Failed to read from event fd");
 
        if (port == virq_port)
                domain_cleanup();
 
 #ifndef TESTING
-       if (write(event_fd, &port, sizeof(port)) != sizeof(port))
+       if (write(eventchn_fd, &port, sizeof(port)) != sizeof(port))
                barf_perror("Failed to write to event fd");
 #endif
 }
 
 bool domain_can_read(struct connection *conn)
 {
-       return buffer_has_input(conn->domain->input);
+       struct xenstore_domain_interface *intf = conn->domain->interface;
+       return (intf->req_cons != intf->req_prod);
 }
 
 bool domain_can_write(struct connection *conn)
 {
-       return (!list_empty(&conn->out_list) &&
-                buffer_has_output_room(conn->domain->output));
-}
-
-static struct domain *new_domain(void *context, domid_t domid,
-                                unsigned long mfn, int port,
-                                const char *path)
+       struct xenstore_domain_interface *intf = conn->domain->interface;
+       return ((intf->rsp_prod - intf->rsp_cons) != XENSTORE_RING_SIZE);
+}
+
+static char *talloc_domain_path(void *context, unsigned int domid)
+{
+       return talloc_asprintf(context, "/local/domain/%u", domid);
+}
+
+static struct domain *new_domain(void *context, unsigned int domid,
+                                unsigned long mfn, int port)
 {
        struct domain *domain;
        struct ioctl_evtchn_bind_interdomain bind;
        int rc;
+
 
        domain = talloc(context, struct domain);
        domain->port = 0;
        domain->shutdown = 0;
        domain->domid = domid;
-       domain->path = talloc_strdup(domain, path);
-       domain->page = xc_map_foreign_range(*xc_handle, domain->domid,
-                                           getpagesize(),
-                                           PROT_READ|PROT_WRITE,
-                                           mfn);
-       if (!domain->page)
+       domain->path = talloc_domain_path(domain, domid);
+       domain->interface = xc_map_foreign_range(
+               *xc_handle, domain->domid,
+               getpagesize(), PROT_READ|PROT_WRITE, mfn);
+       if (!domain->interface)
                return NULL;
 
        list_add(&domain->list, &domains);
        talloc_set_destructor(domain, destroy_domain);
 
-       /* One in each half of page. */
-       domain->input = domain->page;
-       domain->output = domain->page + getpagesize()/2;
-
        /* Tell kernel we're interested in this event. */
-       bind.remote_domain = domid;
-       bind.remote_port   = port;
-       rc = ioctl(eventchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
-       if (rc == -1)
-               return NULL;
-
-       domain->port = rc;
+        bind.remote_domain = domid;
+        bind.remote_port   = port;
+        rc = ioctl(eventchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
+        if (rc == -1)
+            return NULL;
+        domain->port = rc;
+
        domain->conn = new_connection(writechn, readchn);
        domain->conn->domain = domain;
 
@@ -338,7 +295,7 @@
 }
 
 
-static struct domain *find_domain_by_domid(domid_t domid)
+static struct domain *find_domain_by_domid(unsigned int domid)
 {
        struct domain *i;
 
@@ -354,11 +311,10 @@
 void do_introduce(struct connection *conn, struct buffered_data *in)
 {
        struct domain *domain;
-       char *vec[4];
-       domid_t domid;
+       char *vec[3];
+       unsigned int domid;
        unsigned long mfn;
-       u16 port;
-       const char *path;
+       uint16_t port;
 
        if (get_strings(in, vec, ARRAY_SIZE(vec)) < ARRAY_SIZE(vec)) {
                send_error(conn, EINVAL);
@@ -373,10 +329,9 @@
        domid = atoi(vec[0]);
        mfn = atol(vec[1]);
        port = atoi(vec[2]);
-       path = vec[3];
 
        /* Sanity check args. */
-       if ((port <= 0) || !is_valid_nodename(path)) {
+       if (port <= 0) { 
                send_error(conn, EINVAL);
                return;
        }
@@ -385,7 +340,7 @@
 
        if (domain == NULL) {
                /* Hang domain off "in" until we're finished. */
-               domain = new_domain(in, domid, mfn, port, path);
+               domain = new_domain(in, domid, mfn, port);
                if (!domain) {
                        send_error(conn, errno);
                        return;
@@ -400,8 +355,7 @@
                /* Check that the given details match the ones we have
                   previously recorded. */
                if (port != domain->remote_port ||
-                   mfn != domain->mfn ||
-                   strcmp(path, domain->path) != 0) {
+                   mfn != domain->mfn) {
                        send_error(conn, EINVAL);
                        return;
                }
@@ -414,7 +368,7 @@
 void do_release(struct connection *conn, const char *domid_str)
 {
        struct domain *domain;
-       domid_t domid;
+       unsigned int domid;
 
        if (!domid_str) {
                send_error(conn, EINVAL);
@@ -452,25 +406,37 @@
 
 void do_get_domain_path(struct connection *conn, const char *domid_str)
 {
-       struct domain *domain;
-       domid_t domid;
+       char *path;
 
        if (!domid_str) {
                send_error(conn, EINVAL);
                return;
        }
 
+       path = talloc_domain_path(conn, atoi(domid_str));
+
+       send_reply(conn, XS_GET_DOMAIN_PATH, path, strlen(path) + 1);
+
+       talloc_free(path);
+}
+
+void do_is_domain_introduced(struct connection *conn, const char *domid_str)
+{
+       int result;
+       unsigned int domid;
+
+        if (!domid_str) {
+                send_error(conn, EINVAL);
+                return;
+        }
+
        domid = atoi(domid_str);
        if (domid == DOMID_SELF)
-               domain = conn->domain;
+               result = 1;
        else
-               domain = find_domain_by_domid(domid);
-
-       if (!domain)
-               send_error(conn, ENOENT);
-       else
-               send_reply(conn, XS_GET_DOMAIN_PATH, domain->path,
-                          strlen(domain->path) + 1);
+               result = (find_domain_by_domid(domid) != NULL);
+
+       send_reply(conn, XS_IS_DOMAIN_INTRODUCED, result ? "T" : "F", 2);
 }
 
 static int close_xc_handle(void *_handle)
@@ -492,19 +458,51 @@
 {
 }
 
+static int dom0_init(void) 
+{ 
+        int rc, fd, port; 
+        unsigned long mfn; 
+        char str[20]; 
+        struct domain *dom0; 
+        
+        fd = open(XENSTORED_PROC_MFN, O_RDONLY); 
+        
+        rc = read(fd, str, sizeof(str)); 
+        str[rc] = '\0'; 
+        mfn = strtoul(str, NULL, 0); 
+        
+        close(fd); 
+        
+        fd = open(XENSTORED_PROC_PORT, O_RDONLY); 
+        
+        rc = read(fd, str, sizeof(str)); 
+        str[rc] = '\0'; 
+        port = strtoul(str, NULL, 0); 
+        
+        close(fd); 
+        
+        
+        dom0 = new_domain(NULL, 0, mfn, port); 
+        talloc_steal(dom0->conn, dom0); 
+
+        evtchn_notify(dom0->port); 
+
+        return 0; 
+}
+
+
+
 #define EVTCHN_DEV_NAME  "/dev/xen/evtchn"
 #define EVTCHN_DEV_MAJOR 10
 #define EVTCHN_DEV_MINOR 201
 
+
 /* Returns the event channel handle. */
 int domain_init(void)
 {
        struct stat st;
        struct ioctl_evtchn_bind_virq bind;
        int rc;
-
-       /* The size of the ringbuffer: half a page minus head structure. */
-       ringbuf_datasize = getpagesize() / 2 - sizeof(struct ringbuf_head);
 
        xc_handle = talloc(talloc_autofree_context(), int);
        if (!xc_handle)
@@ -539,6 +537,9 @@
        if (eventchn_fd < 0)
                barf_perror("Failed to open evtchn device");
 
+        if (dom0_init() != 0) 
+                barf_perror("Failed to initialize dom0 state"); 
+     
        bind.virq = VIRQ_DOM_EXC;
        rc = ioctl(eventchn_fd, IOCTL_EVTCHN_BIND_VIRQ, &bind);
        if (rc == -1)
@@ -547,3 +548,13 @@
 
        return eventchn_fd;
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstored_domain.h
--- a/tools/xenstore/xenstored_domain.h Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstored_domain.h Mon Oct 24 15:08:13 2005
@@ -26,6 +26,9 @@
 void do_introduce(struct connection *conn, struct buffered_data *in);
 
 /* domid */
+void do_is_domain_introduced(struct connection *conn, const char *domid_str);
+
+/* domid */
 void do_release(struct connection *conn, const char *domid_str);
 
 /* domid */
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstored_transaction.c
--- a/tools/xenstore/xenstored_transaction.c    Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstored_transaction.c    Mon Oct 24 15:08:13 2005
@@ -53,7 +53,7 @@
        struct list_head list;
 
        /* Connection-local identifier for this transaction. */
-       u32 id;
+       uint32_t id;
 
        /* Generation when transaction started. */
        unsigned int generation;
@@ -107,7 +107,7 @@
        return 0;
 }
 
-struct transaction *transaction_lookup(struct connection *conn, u32 id)
+struct transaction *transaction_lookup(struct connection *conn, uint32_t id)
 {
        struct transaction *trans;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstored_transaction.h
--- a/tools/xenstore/xenstored_transaction.h    Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstored_transaction.h    Mon Oct 24 15:08:13 2005
@@ -25,7 +25,7 @@
 void do_transaction_start(struct connection *conn, struct buffered_data *node);
 void do_transaction_end(struct connection *conn, const char *arg);
 
-struct transaction *transaction_lookup(struct connection *conn, u32 id);
+struct transaction *transaction_lookup(struct connection *conn, uint32_t id);
 
 /* This node was changed: can fail and longjmp. */
 void add_change_node(struct transaction *trans, const char *node,
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs.c
--- a/tools/xenstore/xs.c       Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs.c       Mon Oct 24 15:08:13 2005
@@ -304,7 +304,7 @@
        unsigned int i;
        struct sigaction ignorepipe, oldact;
 
-       msg.tx_id = (u32)(unsigned long)t;
+       msg.tx_id = (uint32_t)(unsigned long)t;
        msg.type = type;
        msg.len = 0;
        for (i = 0; i < num_vecs; i++)
@@ -510,7 +510,7 @@
        iov[0].iov_len = strlen(path) + 1;
        
        for (i = 0; i < num_perms; i++) {
-               char buffer[MAX_STRLEN(domid_t)+1];
+               char buffer[MAX_STRLEN(unsigned int)+1];
 
                if (!xs_perm_to_string(&perms[i], buffer))
                        goto unwind;
@@ -672,13 +672,14 @@
  * This tells the store daemon about a shared memory page and event channel
  * associated with a domain: the domain uses these to communicate.
  */
-bool xs_introduce_domain(struct xs_handle *h, domid_t domid, unsigned long mfn,
-                        unsigned int eventchn, const char *path)
+bool xs_introduce_domain(struct xs_handle *h,
+                        unsigned int domid, unsigned long mfn,
+                        unsigned int eventchn)
 {
        char domid_str[MAX_STRLEN(domid)];
        char mfn_str[MAX_STRLEN(mfn)];
        char eventchn_str[MAX_STRLEN(eventchn)];
-       struct iovec iov[4];
+       struct iovec iov[3];
 
        sprintf(domid_str, "%u", domid);
        sprintf(mfn_str, "%lu", mfn);
@@ -690,29 +691,40 @@
        iov[1].iov_len = strlen(mfn_str) + 1;
        iov[2].iov_base = eventchn_str;
        iov[2].iov_len = strlen(eventchn_str) + 1;
-       iov[3].iov_base = (char *)path;
-       iov[3].iov_len = strlen(path) + 1;
 
        return xs_bool(xs_talkv(h, NULL, XS_INTRODUCE, iov,
                                ARRAY_SIZE(iov), NULL));
 }
 
-bool xs_release_domain(struct xs_handle *h, domid_t domid)
+static void * single_with_domid(struct xs_handle *h,
+                               enum xsd_sockmsg_type type,
+                               unsigned int domid)
 {
        char domid_str[MAX_STRLEN(domid)];
 
        sprintf(domid_str, "%u", domid);
 
-       return xs_bool(xs_single(h, NULL, XS_RELEASE, domid_str, NULL));
-}
-
-char *xs_get_domain_path(struct xs_handle *h, domid_t domid)
+       return xs_single(h, NULL, type, domid_str, NULL);
+}
+
+bool xs_release_domain(struct xs_handle *h, unsigned int domid)
+{
+       return xs_bool(single_with_domid(h, XS_RELEASE, domid));
+}
+
+char *xs_get_domain_path(struct xs_handle *h, unsigned int domid)
 {
        char domid_str[MAX_STRLEN(domid)];
 
        sprintf(domid_str, "%u", domid);
 
        return xs_single(h, NULL, XS_GET_DOMAIN_PATH, domid_str, NULL);
+}
+
+bool xs_is_domain_introduced(struct xs_handle *h, unsigned int domid)
+{
+       return strcmp("F",
+                     single_with_domid(h, XS_IS_DOMAIN_INTRODUCED, domid));
 }
 
 /* Only useful for DEBUG versions */
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs.h
--- a/tools/xenstore/xs.h       Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs.h       Mon Oct 24 15:08:13 2005
@@ -124,20 +124,25 @@
                        bool abort);
 
 /* Introduce a new domain.
- * This tells the store daemon about a shared memory page, event channel
- * and store path associated with a domain: the domain uses these to 
communicate.
+ * This tells the store daemon about a shared memory page, event channel and
+ * store path associated with a domain: the domain uses these to communicate.
  */
-bool xs_introduce_domain(struct xs_handle *h, domid_t domid, unsigned long mfn,
-                         unsigned int eventchn, const char *path);
-
+bool xs_introduce_domain(struct xs_handle *h,
+                        unsigned int domid,
+                        unsigned long mfn,
+                         unsigned int eventchn); 
 /* Release a domain.
  * Tells the store domain to release the memory page to the domain.
  */
-bool xs_release_domain(struct xs_handle *h, domid_t domid);
+bool xs_release_domain(struct xs_handle *h, unsigned int domid);
 
 /* Query the home path of a domain.
  */
-char *xs_get_domain_path(struct xs_handle *h, domid_t domid);
+char *xs_get_domain_path(struct xs_handle *h, unsigned int domid);
+
+/* Return whether the domain specified has been introduced to xenstored.
+ */
+bool xs_is_domain_introduced(struct xs_handle *h, unsigned int domid);
 
 /* Only useful for DEBUG versions */
 char *xs_debug_command(struct xs_handle *h, const char *cmd,
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs_crashme.c
--- a/tools/xenstore/xs_crashme.c       Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs_crashme.c       Mon Oct 24 15:08:13 2005
@@ -68,19 +68,19 @@
  * of bytes.  No alignment or length assumptions are made about
  * the input key.
  */
-static inline u32 jhash(const void *key, u32 length, u32 initval)
-{
-       u32 a, b, c, len;
-       const u8 *k = key;
+static inline uint32_t jhash(const void *key, uint32_t length, uint32_t 
initval)
+{
+       uint32_t a, b, c, len;
+       const uint8_t *k = key;
 
        len = length;
        a = b = JHASH_GOLDEN_RATIO;
        c = initval;
 
        while (len >= 12) {
-               a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
-               b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
-               c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+               a += (k[0] +((uint32_t)k[1]<<8) +((uint32_t)k[2]<<16) 
+((uint32_t)k[3]<<24));
+               b += (k[4] +((uint32_t)k[5]<<8) +((uint32_t)k[6]<<16) 
+((uint32_t)k[7]<<24));
+               c += (k[8] +((uint32_t)k[9]<<8) 
+((uint32_t)k[10]<<16)+((uint32_t)k[11]<<24));
 
                __jhash_mix(a,b,c);
 
@@ -90,16 +90,16 @@
 
        c += length;
        switch (len) {
-       case 11: c += ((u32)k[10]<<24);
-       case 10: c += ((u32)k[9]<<16);
-       case 9 : c += ((u32)k[8]<<8);
-       case 8 : b += ((u32)k[7]<<24);
-       case 7 : b += ((u32)k[6]<<16);
-       case 6 : b += ((u32)k[5]<<8);
+       case 11: c += ((uint32_t)k[10]<<24);
+       case 10: c += ((uint32_t)k[9]<<16);
+       case 9 : c += ((uint32_t)k[8]<<8);
+       case 8 : b += ((uint32_t)k[7]<<24);
+       case 7 : b += ((uint32_t)k[6]<<16);
+       case 6 : b += ((uint32_t)k[5]<<8);
        case 5 : b += k[4];
-       case 4 : a += ((u32)k[3]<<24);
-       case 3 : a += ((u32)k[2]<<16);
-       case 2 : a += ((u32)k[1]<<8);
+       case 4 : a += ((uint32_t)k[3]<<24);
+       case 3 : a += ((uint32_t)k[2]<<16);
+       case 2 : a += ((uint32_t)k[1]<<8);
        case 1 : a += k[0];
        };
 
@@ -108,12 +108,12 @@
        return c;
 }
 
-/* A special optimized version that handles 1 or more of u32s.
- * The length parameter here is the number of u32s in the key.
+/* A special optimized version that handles 1 or more of uint32_ts.
+ * The length parameter here is the number of uint32_ts in the key.
  */
-static inline u32 jhash2(u32 *k, u32 length, u32 initval)
-{
-       u32 a, b, c, len;
+static inline uint32_t jhash2(uint32_t *k, uint32_t length, uint32_t initval)
+{
+       uint32_t a, b, c, len;
 
        a = b = JHASH_GOLDEN_RATIO;
        c = initval;
@@ -146,7 +146,7 @@
  * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
  *       done at the end is not done here.
  */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+static inline uint32_t jhash_3words(uint32_t a, uint32_t b, uint32_t c, 
uint32_t initval)
 {
        a += JHASH_GOLDEN_RATIO;
        b += JHASH_GOLDEN_RATIO;
@@ -157,12 +157,12 @@
        return c;
 }
 
-static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+static inline uint32_t jhash_2words(uint32_t a, uint32_t b, uint32_t initval)
 {
        return jhash_3words(a, b, 0, initval);
 }
 
-static inline u32 jhash_1word(u32 a, u32 initval)
+static inline uint32_t jhash_1word(uint32_t a, uint32_t initval)
 {
        return jhash_3words(a, 0, 0, initval);
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs_lib.c
--- a/tools/xenstore/xs_lib.c   Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs_lib.c   Mon Oct 24 15:08:13 2005
@@ -135,7 +135,7 @@
        return true;
 }
 
-/* Convert permissions to a string (up to len MAX_STRLEN(domid_t)+1). */
+/* Convert permissions to a string (up to len MAX_STRLEN(unsigned int)+1). */
 bool xs_perm_to_string(const struct xs_permissions *perm, char *buffer)
 {
        switch (perm->perms) {
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs_lib.h
--- a/tools/xenstore/xs_lib.h   Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs_lib.h   Mon Oct 24 15:08:13 2005
@@ -22,8 +22,8 @@
 
 #include <stdbool.h>
 #include <limits.h>
-#include <xenctrl.h>
 #include <errno.h>
+#include <stdint.h>
 #include <xen/io/xs_wire.h>
 
 /* Bitmask of permissions. */
@@ -58,7 +58,7 @@
 bool xs_strings_to_perms(struct xs_permissions *perms, unsigned int num,
                         const char *strings);
 
-/* Convert permissions to a string (up to len MAX_STRLEN(domid_t)+1). */
+/* Convert permissions to a string (up to len MAX_STRLEN(unsigned int)+1). */
 bool xs_perm_to_string(const struct xs_permissions *perm, char *buffer);
 
 /* Given a string and a length, count how many strings (nul terms). */
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs_random.c
--- a/tools/xenstore/xs_random.c        Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs_random.c        Mon Oct 24 15:08:13 2005
@@ -594,19 +594,19 @@
  * of bytes.  No alignment or length assumptions are made about
  * the input key.
  */
-static inline u32 jhash(const void *key, u32 length, u32 initval)
-{
-       u32 a, b, c, len;
-       const u8 *k = key;
+static inline uint32_t jhash(const void *key, uint32_t length, uint32_t 
initval)
+{
+       uint32_t a, b, c, len;
+       const uint8_t *k = key;
 
        len = length;
        a = b = JHASH_GOLDEN_RATIO;
        c = initval;
 
        while (len >= 12) {
-               a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
-               b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
-               c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+               a += (k[0] +((uint32_t)k[1]<<8) +((uint32_t)k[2]<<16) 
+((uint32_t)k[3]<<24));
+               b += (k[4] +((uint32_t)k[5]<<8) +((uint32_t)k[6]<<16) 
+((uint32_t)k[7]<<24));
+               c += (k[8] +((uint32_t)k[9]<<8) 
+((uint32_t)k[10]<<16)+((uint32_t)k[11]<<24));
 
                __jhash_mix(a,b,c);
 
@@ -616,16 +616,16 @@
 
        c += length;
        switch (len) {
-       case 11: c += ((u32)k[10]<<24);
-       case 10: c += ((u32)k[9]<<16);
-       case 9 : c += ((u32)k[8]<<8);
-       case 8 : b += ((u32)k[7]<<24);
-       case 7 : b += ((u32)k[6]<<16);
-       case 6 : b += ((u32)k[5]<<8);
+       case 11: c += ((uint32_t)k[10]<<24);
+       case 10: c += ((uint32_t)k[9]<<16);
+       case 9 : c += ((uint32_t)k[8]<<8);
+       case 8 : b += ((uint32_t)k[7]<<24);
+       case 7 : b += ((uint32_t)k[6]<<16);
+       case 6 : b += ((uint32_t)k[5]<<8);
        case 5 : b += k[4];
-       case 4 : a += ((u32)k[3]<<24);
-       case 3 : a += ((u32)k[2]<<16);
-       case 2 : a += ((u32)k[1]<<8);
+       case 4 : a += ((uint32_t)k[3]<<24);
+       case 3 : a += ((uint32_t)k[2]<<16);
+       case 2 : a += ((uint32_t)k[1]<<8);
        case 1 : a += k[0];
        };
 
@@ -634,12 +634,12 @@
        return c;
 }
 
-/* A special optimized version that handles 1 or more of u32s.
- * The length parameter here is the number of u32s in the key.
+/* A special optimized version that handles 1 or more of uint32_ts.
+ * The length parameter here is the number of uint32_ts in the key.
  */
-static inline u32 jhash2(u32 *k, u32 length, u32 initval)
-{
-       u32 a, b, c, len;
+static inline uint32_t jhash2(uint32_t *k, uint32_t length, uint32_t initval)
+{
+       uint32_t a, b, c, len;
 
        a = b = JHASH_GOLDEN_RATIO;
        c = initval;
@@ -672,7 +672,7 @@
  * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
  *       done at the end is not done here.
  */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+static inline uint32_t jhash_3words(uint32_t a, uint32_t b, uint32_t c, 
uint32_t initval)
 {
        a += JHASH_GOLDEN_RATIO;
        b += JHASH_GOLDEN_RATIO;
@@ -683,12 +683,12 @@
        return c;
 }
 
-static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+static inline uint32_t jhash_2words(uint32_t a, uint32_t b, uint32_t initval)
 {
        return jhash_3words(a, b, 0, initval);
 }
 
-static inline u32 jhash_1word(u32 a, u32 initval)
+static inline uint32_t jhash_1word(uint32_t a, uint32_t initval)
 {
        return jhash_3words(a, 0, 0, initval);
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs_tdb_dump.c
--- a/tools/xenstore/xs_tdb_dump.c      Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs_tdb_dump.c      Mon Oct 24 15:08:13 2005
@@ -11,13 +11,13 @@
 #include "utils.h"
 
 struct record_hdr {
-       u32 num_perms;
-       u32 datalen;
-       u32 childlen;
+       uint32_t num_perms;
+       uint32_t datalen;
+       uint32_t childlen;
        struct xs_permissions perms[0];
 };
 
-static u32 total_size(struct record_hdr *hdr)
+static uint32_t total_size(struct record_hdr *hdr)
 {
        return sizeof(*hdr) + hdr->num_perms * sizeof(struct xs_permissions) 
                + hdr->datalen + hdr->childlen;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xs_test.c
--- a/tools/xenstore/xs_test.c  Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xs_test.c  Mon Oct 24 15:08:13 2005
@@ -50,72 +50,33 @@
 static bool print_input = false;
 static unsigned int linenum = 0;
 
-struct ringbuf_head
-{
-       uint32_t write; /* Next place to write to */
-       uint32_t read; /* Next place to read from */
-       uint8_t flags;
-       char buf[0];
-} __attribute__((packed));
-
-static struct ringbuf_head *out, *in;
-static unsigned int ringbuf_datasize;
 static int daemon_pid;
+static struct xenstore_domain_interface *interface;
 
 /* FIXME: Mark connection as broken (close it?) when this happens. */
-static bool check_buffer(const struct ringbuf_head *h)
-{
-       return (h->write < ringbuf_datasize && h->read < ringbuf_datasize);
-}
-
-/* We can't fill last byte: would look like empty buffer. */
-static void *get_output_chunk(const struct ringbuf_head *h,
-                             void *buf, uint32_t *len)
-{
-       uint32_t read_mark;
-
-       if (h->read == 0)
-               read_mark = ringbuf_datasize - 1;
-       else
-               read_mark = h->read - 1;
-
-       /* Here to the end of buffer, unless they haven't read some out. */
-       *len = ringbuf_datasize - h->write;
-       if (read_mark >= h->write)
-               *len = read_mark - h->write;
-       return buf + h->write;
-}
-
-static const void *get_input_chunk(const struct ringbuf_head *h,
-                                  const void *buf, uint32_t *len)
-{
-       /* Here to the end of buffer, unless they haven't written some. */
-       *len = ringbuf_datasize - h->read;
-       if (h->write >= h->read)
-               *len = h->write - h->read;
-       return buf + h->read;
-}
-
-static int output_avail(struct ringbuf_head *out)
-{
-       unsigned int avail;
-
-       get_output_chunk(out, out->buf, &avail);
-       return avail != 0;
-}
-
-static void update_output_chunk(struct ringbuf_head *h, uint32_t len)
-{
-       h->write += len;
-       if (h->write == ringbuf_datasize)
-               h->write = 0;
-}
-
-static void update_input_chunk(struct ringbuf_head *h, uint32_t len)
-{
-       h->read += len;
-       if (h->read == ringbuf_datasize)
-               h->read = 0;
+static bool check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
+{
+       return ((prod - cons) <= XENSTORE_RING_SIZE);
+}
+
+static void *get_output_chunk(XENSTORE_RING_IDX cons,
+                             XENSTORE_RING_IDX prod,
+                             char *buf, uint32_t *len)
+{
+       *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
+       if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
+               *len = XENSTORE_RING_SIZE - (prod - cons);
+       return buf + MASK_XENSTORE_IDX(prod);
+}
+
+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
+                                  XENSTORE_RING_IDX prod,
+                                  const char *buf, uint32_t *len)
+{
+       *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+       if ((prod - cons) < *len)
+               *len = prod - cons;
+       return buf + MASK_XENSTORE_IDX(cons);
 }
 
 /* FIXME: We spin, and we're sloppy. */
@@ -123,25 +84,28 @@
                           void *data, unsigned int len)
 {
        unsigned int avail;
-       int was_full;
-
-       if (!check_buffer(in))
-               barf("Corrupt buffer");
-
-       was_full = !output_avail(in);
+       struct xenstore_domain_interface *intf = interface;
+       XENSTORE_RING_IDX cons, prod;
+       const void *src;
+
        while (len) {
-               const void *src = get_input_chunk(in, in->buf, &avail);
+               cons = intf->rsp_cons;
+               prod = intf->rsp_prod;
+               if (!check_indexes(cons, prod))
+                       barf("Corrupt buffer");
+
+               src = get_input_chunk(cons, prod, intf->rsp, &avail);
                if (avail > len)
                        avail = len;
                memcpy(data, src, avail);
                data += avail;
                len -= avail;
-               update_input_chunk(in, avail);
+               intf->rsp_cons += avail;
        }
 
        /* Tell other end we read something. */
-       if (was_full)
-               kill(daemon_pid, SIGUSR2);
+       kill(daemon_pid, SIGUSR2);
+
        return true;
 }
 
@@ -149,22 +113,28 @@
                            const void *data, unsigned int len)
 {
        uint32_t avail;
-
-       if (!check_buffer(out))
-               barf("Corrupt buffer");
+       struct xenstore_domain_interface *intf = interface;
+       XENSTORE_RING_IDX cons, prod;
+       void *dst;
 
        while (len) {
-               void *dst = get_output_chunk(out, out->buf, &avail);
+               cons = intf->req_cons;
+               prod = intf->req_prod;
+               if (!check_indexes(cons, prod))
+                       barf("Corrupt buffer");
+
+               dst = get_output_chunk(cons, prod, intf->req, &avail);
                if (avail > len)
                        avail = len;
                memcpy(dst, data, avail);
                data += avail;
                len -= avail;
-               update_output_chunk(out, avail);
+               intf->req_prod += avail;
        }
 
        /* Tell other end we wrote something. */
        kill(daemon_pid, SIGUSR2);
+
        return true;
 }
 
@@ -552,21 +522,21 @@
                        break;
 
        fd = open("/tmp/xcmap", O_RDWR);
-       /* Set in and out pointers. */
-       out = mmap(NULL, getpagesize(), PROT_WRITE|PROT_READ, MAP_SHARED,fd,0);
-       if (out == MAP_FAILED)
+       /* Set shared comms page. */
+       interface = mmap(NULL, getpagesize(), PROT_WRITE|PROT_READ,
+                        MAP_SHARED,fd,0);
+       if (interface == MAP_FAILED)
                barf_perror("Failed to map /tmp/xcmap page");
-       in = (void *)out + getpagesize() / 2;
        close(fd);
 
        /* Tell them the event channel and our PID. */
-       *(int *)((void *)out + 32) = getpid();
-       *(u16 *)((void *)out + 36) = atoi(eventchn);
+       *(int *)((void *)interface + 32) = getpid();
+       *(uint16_t *)((void *)interface + 36) = atoi(eventchn);
 
        if (!xs_introduce_domain(handles[handle], atoi(domid),
                                 atol(mfn), atoi(eventchn), path)) {
                failed(handle);
-               munmap(out, getpagesize());
+               munmap(interface, getpagesize());
                return;
        }
        output("handle is %i\n", i);
@@ -576,7 +546,7 @@
        handles[i]->fd = -2;
 
        /* Read in daemon pid. */
-       daemon_pid = *(int *)((void *)out + 32);
+       daemon_pid = *(int *)((void *)interface + 32);
 }
 
 static void do_release(unsigned int handle, const char *domid)
@@ -823,9 +793,6 @@
                usage();
        
 
-       /* The size of the ringbuffer: half a page minus head structure. */
-       ringbuf_datasize = getpagesize() / 2 - sizeof(struct ringbuf_head);
-
        signal(SIGALRM, alarmed);
        while (fgets(line, sizeof(line), stdin))
                do_command(0, line);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xsls.c
--- a/tools/xenstore/xsls.c     Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xsls.c     Mon Oct 24 15:08:13 2005
@@ -8,7 +8,8 @@
 {
     char **e;
     char newpath[512], *val;
-    int num, i, len;
+    int i;
+    unsigned int num, len;
 
     e = xs_directory(h, NULL, path, &num);
     if (e == NULL)
@@ -25,7 +26,7 @@
         if (val == NULL)
             printf(":\n");
         else if ((unsigned)len > (151 - strlen(e[i])))
-            printf(" = \"%.*s...\"\n", 148 - (int)strlen(e[i]), val);
+            printf(" = \"%.*s...\"\n", (int)(148 - strlen(e[i])), val);
         else
             printf(" = \"%s\"\n", val);
         free(val);
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xentrace/xenctx.c
--- a/tools/xentrace/xenctx.c   Fri Oct 21 19:58:39 2005
+++ b/tools/xentrace/xenctx.c   Mon Oct 24 15:08:13 2005
@@ -81,7 +81,7 @@
 }
 #endif
 
-void dump_ctx(u32 domid, u32 vcpu)
+void dump_ctx(uint32_t domid, uint32_t vcpu)
 {
     int ret;
     vcpu_guest_context_t ctx;
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xentrace/xentrace.c
--- a/tools/xentrace/xentrace.c Fri Oct 21 19:58:39 2005
+++ b/tools/xentrace/xentrace.c Mon Oct 24 15:08:13 2005
@@ -45,8 +45,8 @@
     char *outfile;
     struct timespec poll_sleep;
     unsigned long new_data_thresh;
-    u32 evt_mask;
-    u32 cpu_mask;
+    uint32_t evt_mask;
+    uint32_t cpu_mask;
 } settings_t;
 
 settings_t opts;
@@ -168,7 +168,7 @@
  * @type:           the new mask type,0-event mask, 1-cpu mask
  *
  */
-void set_mask(u32 mask, int type)
+void set_mask(uint32_t mask, int type)
 {
     int ret;
     dom0_op_t op;                        /* dom0 op we'll build             */
@@ -496,7 +496,8 @@
     "\v"
     "This tool is used to capture trace buffer data from Xen.  The data is "
     "output in a binary format, in the following order:\n\n"
-    "  CPU(uint) TSC(u64) EVENT(u32) D1 D2 D3 D4 D5 (all u32)\n\n"
+    "  CPU(uint) TSC(uint64_t) EVENT(uint32_t) D1 D2 D3 D4 D5 "
+    "(all uint32_t)\n\n"
     "The output should be parsed using the tool xentrace_format, which can "
     "produce human-readable output in ASCII format."
 };
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/Rules.mk
--- a/xen/Rules.mk      Fri Oct 21 19:58:39 2005
+++ b/xen/Rules.mk      Mon Oct 24 15:08:13 2005
@@ -2,7 +2,7 @@
 # If you change any of these configuration options then you must
 # 'make clean' before rebuilding.
 #
-verbose     ?= y
+verbose     ?= n
 debug       ?= n
 perfc       ?= n
 perfc_arrays?= n
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/acm/acm_chinesewall_hooks.c
--- a/xen/acm/acm_chinesewall_hooks.c   Fri Oct 21 19:58:39 2005
+++ b/xen/acm/acm_chinesewall_hooks.c   Mon Oct 24 15:08:13 2005
@@ -26,7 +26,10 @@
  *    in which case all types of a new domain must be conflict-free
  *    with all types of already running domains.
  *
+ * indent -i4 -kr -nut
+ *
  */
+
 #include <xen/config.h>
 #include <xen/errno.h>
 #include <xen/types.h>
@@ -48,270 +51,333 @@
  */
 int acm_init_chwall_policy(void)
 {
-       /* minimal startup policy; policy write-locked already */
-       chwall_bin_pol.max_types = 1;
-       chwall_bin_pol.max_ssidrefs = 2;
-       chwall_bin_pol.max_conflictsets = 1;
-       chwall_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 
chwall_bin_pol.max_ssidrefs*chwall_bin_pol.max_types);
-       chwall_bin_pol.conflict_sets = (domaintype_t 
*)xmalloc_array(domaintype_t, 
chwall_bin_pol.max_conflictsets*chwall_bin_pol.max_types);
-       chwall_bin_pol.running_types = (domaintype_t 
*)xmalloc_array(domaintype_t, chwall_bin_pol.max_types);
-       chwall_bin_pol.conflict_aggregate_set = (domaintype_t 
*)xmalloc_array(domaintype_t, chwall_bin_pol.max_types);
-       
-       if ((chwall_bin_pol.conflict_sets == NULL) || 
(chwall_bin_pol.running_types == NULL) ||
-           (chwall_bin_pol.ssidrefs == NULL) || 
(chwall_bin_pol.conflict_aggregate_set == NULL))
-               return ACM_INIT_SSID_ERROR;
-
-       /* initialize state */
-       memset((void *)chwall_bin_pol.ssidrefs, 0, 
chwall_bin_pol.max_ssidrefs*chwall_bin_pol.max_types*sizeof(domaintype_t));
-       memset((void *)chwall_bin_pol.conflict_sets, 0, 
chwall_bin_pol.max_conflictsets*chwall_bin_pol.max_types*sizeof(domaintype_t));
-       memset((void *)chwall_bin_pol.running_types, 0, 
chwall_bin_pol.max_types*sizeof(domaintype_t));
-       memset((void *)chwall_bin_pol.conflict_aggregate_set, 0, 
chwall_bin_pol.max_types*sizeof(domaintype_t));        
-       return ACM_OK;
-}
-
-static int
-chwall_init_domain_ssid(void **chwall_ssid, ssidref_t ssidref)
-{
-       struct chwall_ssid *chwall_ssidp = xmalloc(struct chwall_ssid);
-       traceprintk("%s.\n", __func__);
-       if (chwall_ssidp == NULL)
-               return ACM_INIT_SSID_ERROR;
-       /* 
-        * depending on wheter chwall is primary or secondary, get the 
respective
-        * part of the global ssidref (same way we'll get the partial ssid 
pointer)
-        */
-       chwall_ssidp->chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, 
ssidref);
-       if ((chwall_ssidp->chwall_ssidref >= chwall_bin_pol.max_ssidrefs) ||
-           (chwall_ssidp->chwall_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
-               printkd("%s: ERROR chwall_ssidref(%x) undefined (>max) or unset 
(0).\n",
-                       __func__, chwall_ssidp->chwall_ssidref);
-               xfree(chwall_ssidp);
-               return ACM_INIT_SSID_ERROR;
-       }
-       (*chwall_ssid) = chwall_ssidp;
-       printkd("%s: determined chwall_ssidref to %x.\n", 
-              __func__, chwall_ssidp->chwall_ssidref);
-       return ACM_OK;
-}
-
-static void
-chwall_free_domain_ssid(void *chwall_ssid)
-{
-       traceprintk("%s.\n", __func__);
-       if (chwall_ssid != NULL)
-               xfree(chwall_ssid);
-       return;
+    /* minimal startup policy; policy write-locked already */
+    chwall_bin_pol.max_types = 1;
+    chwall_bin_pol.max_ssidrefs = 2;
+    chwall_bin_pol.max_conflictsets = 1;
+    chwall_bin_pol.ssidrefs =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_ssidrefs *
+                                       chwall_bin_pol.max_types);
+    chwall_bin_pol.conflict_sets =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_conflictsets *
+                                       chwall_bin_pol.max_types);
+    chwall_bin_pol.running_types =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_types);
+    chwall_bin_pol.conflict_aggregate_set =
+        (domaintype_t *) xmalloc_array(domaintype_t,
+                                       chwall_bin_pol.max_types);
+
+    if ((chwall_bin_pol.conflict_sets == NULL)
+        || (chwall_bin_pol.running_types == NULL)
+        || (chwall_bin_pol.ssidrefs == NULL)
+        || (chwall_bin_pol.conflict_aggregate_set == NULL))
+        return ACM_INIT_SSID_ERROR;
+
+    /* initialize state */
+    memset((void *) chwall_bin_pol.ssidrefs, 0,
+           chwall_bin_pol.max_ssidrefs * chwall_bin_pol.max_types *
+           sizeof(domaintype_t));
+    memset((void *) chwall_bin_pol.conflict_sets, 0,
+           chwall_bin_pol.max_conflictsets * chwall_bin_pol.max_types *
+           sizeof(domaintype_t));
+    memset((void *) chwall_bin_pol.running_types, 0,
+           chwall_bin_pol.max_types * sizeof(domaintype_t));
+    memset((void *) chwall_bin_pol.conflict_aggregate_set, 0,
+           chwall_bin_pol.max_types * sizeof(domaintype_t));
+    return ACM_OK;
+}
+
+static int chwall_init_domain_ssid(void **chwall_ssid, ssidref_t ssidref)
+{
+    struct chwall_ssid *chwall_ssidp = xmalloc(struct chwall_ssid);
+    traceprintk("%s.\n", __func__);
+    if (chwall_ssidp == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+    chwall_ssidp->chwall_ssidref =
+        GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+
+    if ((chwall_ssidp->chwall_ssidref >= chwall_bin_pol.max_ssidrefs)
+        || (chwall_ssidp->chwall_ssidref == ACM_DEFAULT_LOCAL_SSID))
+    {
+        printkd("%s: ERROR chwall_ssidref(%x) undefined (>max) or unset 
(0).\n",
+                __func__, chwall_ssidp->chwall_ssidref);
+        xfree(chwall_ssidp);
+        return ACM_INIT_SSID_ERROR;
+    }
+    (*chwall_ssid) = chwall_ssidp;
+    printkd("%s: determined chwall_ssidref to %x.\n",
+            __func__, chwall_ssidp->chwall_ssidref);
+    return ACM_OK;
+}
+
+static void chwall_free_domain_ssid(void *chwall_ssid)
+{
+    traceprintk("%s.\n", __func__);
+    if (chwall_ssid != NULL)
+        xfree(chwall_ssid);
+    return;
 }
 
 
 /* dump chinese wall cache; policy read-locked already */
-static int
-chwall_dump_policy(u8 *buf, u16 buf_size) {    
-     struct acm_chwall_policy_buffer *chwall_buf = (struct 
acm_chwall_policy_buffer *)buf;
-     int ret = 0;
-
-     chwall_buf->chwall_max_types = htonl(chwall_bin_pol.max_types);
-     chwall_buf->chwall_max_ssidrefs = htonl(chwall_bin_pol.max_ssidrefs);
-     chwall_buf->policy_code = htonl(ACM_CHINESE_WALL_POLICY);
-     chwall_buf->chwall_ssid_offset = htonl(sizeof(struct 
acm_chwall_policy_buffer));
-     chwall_buf->chwall_max_conflictsets = 
htonl(chwall_bin_pol.max_conflictsets);
-     chwall_buf->chwall_conflict_sets_offset =
-            htonl(
-                  ntohl(chwall_buf->chwall_ssid_offset) +
-                  sizeof(domaintype_t) * chwall_bin_pol.max_ssidrefs * 
-                  chwall_bin_pol.max_types);
-
-     chwall_buf->chwall_running_types_offset = 
-            htonl(
-                  ntohl(chwall_buf->chwall_conflict_sets_offset) +
-                  sizeof(domaintype_t) * chwall_bin_pol.max_conflictsets *
-                  chwall_bin_pol.max_types);
-
-     chwall_buf->chwall_conflict_aggregate_offset =
-            htonl(
-                  ntohl(chwall_buf->chwall_running_types_offset) +
-                  sizeof(domaintype_t) * chwall_bin_pol.max_types);
-
-     ret = ntohl(chwall_buf->chwall_conflict_aggregate_offset) +
-            sizeof(domaintype_t) * chwall_bin_pol.max_types;
-
-     /* now copy buffers over */
-     arrcpy16((u16 *)(buf + ntohl(chwall_buf->chwall_ssid_offset)),
-             chwall_bin_pol.ssidrefs,
-             chwall_bin_pol.max_ssidrefs * chwall_bin_pol.max_types);
-
-     arrcpy16((u16 *)(buf + ntohl(chwall_buf->chwall_conflict_sets_offset)),
-             chwall_bin_pol.conflict_sets,
-             chwall_bin_pol.max_conflictsets * chwall_bin_pol.max_types);
-
-     arrcpy16((u16 *)(buf + ntohl(chwall_buf->chwall_running_types_offset)),
-             chwall_bin_pol.running_types,
-             chwall_bin_pol.max_types);
-
-     arrcpy16((u16 *)(buf + 
ntohl(chwall_buf->chwall_conflict_aggregate_offset)),
-             chwall_bin_pol.conflict_aggregate_set,
-             chwall_bin_pol.max_types);
-     return ret;
+static int chwall_dump_policy(u8 * buf, u32 buf_size)
+{
+    struct acm_chwall_policy_buffer *chwall_buf =
+        (struct acm_chwall_policy_buffer *) buf;
+    int ret = 0;
+
+    if (buf_size < sizeof(struct acm_chwall_policy_buffer))
+        return -EINVAL;
+
+    chwall_buf->chwall_max_types = htonl(chwall_bin_pol.max_types);
+    chwall_buf->chwall_max_ssidrefs = htonl(chwall_bin_pol.max_ssidrefs);
+    chwall_buf->policy_code = htonl(ACM_CHINESE_WALL_POLICY);
+    chwall_buf->chwall_ssid_offset =
+        htonl(sizeof(struct acm_chwall_policy_buffer));
+    chwall_buf->chwall_max_conflictsets =
+        htonl(chwall_bin_pol.max_conflictsets);
+    chwall_buf->chwall_conflict_sets_offset =
+        htonl(ntohl(chwall_buf->chwall_ssid_offset) +
+              sizeof(domaintype_t) * chwall_bin_pol.max_ssidrefs *
+              chwall_bin_pol.max_types);
+    chwall_buf->chwall_running_types_offset =
+        htonl(ntohl(chwall_buf->chwall_conflict_sets_offset) +
+              sizeof(domaintype_t) * chwall_bin_pol.max_conflictsets *
+              chwall_bin_pol.max_types);
+    chwall_buf->chwall_conflict_aggregate_offset =
+        htonl(ntohl(chwall_buf->chwall_running_types_offset) +
+              sizeof(domaintype_t) * chwall_bin_pol.max_types);
+
+    ret = ntohl(chwall_buf->chwall_conflict_aggregate_offset) +
+        sizeof(domaintype_t) * chwall_bin_pol.max_types;
+
+    if (buf_size < ret)
+        return -EINVAL;
+
+    /* now copy buffers over */
+    arrcpy16((u16 *) (buf + ntohl(chwall_buf->chwall_ssid_offset)),
+             chwall_bin_pol.ssidrefs,
+             chwall_bin_pol.max_ssidrefs * chwall_bin_pol.max_types);
+
+    arrcpy16((u16 *) (buf +
+                      ntohl(chwall_buf->chwall_conflict_sets_offset)),
+             chwall_bin_pol.conflict_sets,
+             chwall_bin_pol.max_conflictsets * chwall_bin_pol.max_types);
+
+    arrcpy16((u16 *) (buf +
+                      ntohl(chwall_buf->chwall_running_types_offset)),
+             chwall_bin_pol.running_types, chwall_bin_pol.max_types);
+
+    arrcpy16((u16 *) (buf +
+                      ntohl(chwall_buf->chwall_conflict_aggregate_offset)),
+             chwall_bin_pol.conflict_aggregate_set,
+             chwall_bin_pol.max_types);
+    return ret;
 }
 
 /* adapt security state (running_types and conflict_aggregate_set) to all 
running
  * domains; chwall_init_state is called when a policy is changed to bring the 
security
  * information into a consistent state and to detect violations (return != 0).
  * from a security point of view, we simulate that all running domains are 
re-started
- */ 
+ */
 static int
-chwall_init_state(struct acm_chwall_policy_buffer *chwall_buf, domaintype_t 
*ssidrefs, domaintype_t *conflict_sets,
-                 domaintype_t *running_types, domaintype_t 
*conflict_aggregate_set)
-{
-       int violation = 0, i, j;
-       struct chwall_ssid *chwall_ssid;
-       ssidref_t chwall_ssidref;
-       struct domain **pd;
-
-        write_lock(&domlist_lock);
-       /* go through all domains and adjust policy as if this domain was 
started now */
-        pd = &domain_list;
-        for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-               chwall_ssid = GET_SSIDP(ACM_CHINESE_WALL_POLICY, (struct 
acm_ssid_domain *)(*pd)->ssid);
-               chwall_ssidref = chwall_ssid->chwall_ssidref;
-               traceprintk("%s: validating policy for domain %x 
(chwall-REF=%x).\n", 
-                       __func__, (*pd)->domain_id, chwall_ssidref);
-               /* a) adjust types ref-count for running domains */
-               for (i=0; i< chwall_buf->chwall_max_types; i++)
-                       running_types[i] +=
-                               
ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + i];
-
-               /* b) check for conflict */
-               for (i=0; i< chwall_buf->chwall_max_types; i++)
-                       if (conflict_aggregate_set[i] && 
-                           
ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + i]) {
-                               printk("%s: CHINESE WALL CONFLICT in type 
%02x.\n", __func__, i);
-                               violation = 1;
-                               goto out;
-                       }
-               /* set violation and break out of the loop */
-               /* c) adapt conflict aggregate set for this domain (notice 
conflicts) */
-               for (i=0; i<chwall_buf->chwall_max_conflictsets; i++) {
-                       int common = 0;
-                       /* check if conflict_set_i and ssidref have common 
types */
-                       for (j=0; j<chwall_buf->chwall_max_types; j++)
-                               if 
(conflict_sets[i*chwall_buf->chwall_max_types + j] &&
-                                   
ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + j]) {
-                                       common = 1;
-                                       break;
-                               }
-                       if (common == 0)
-                               continue; /* try next conflict set */
-                       /* now add types of the conflict set to 
conflict_aggregate_set (except types in chwall_ssidref) */
-                       for (j=0; j<chwall_buf->chwall_max_types; j++)
-                               if 
(conflict_sets[i*chwall_buf->chwall_max_types + j] &&
-                                   
!ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + j])
-                                       conflict_aggregate_set[j]++;
-               }       
-       }
+chwall_init_state(struct acm_chwall_policy_buffer *chwall_buf,
+                  domaintype_t * ssidrefs, domaintype_t * conflict_sets,
+                  domaintype_t * running_types,
+                  domaintype_t * conflict_aggregate_set)
+{
+    int violation = 0, i, j;
+    struct chwall_ssid *chwall_ssid;
+    ssidref_t chwall_ssidref;
+    struct domain **pd;
+
+    write_lock(&domlist_lock);
+    /* go through all domains and adjust policy as if this domain was started 
now */
+    pd = &domain_list;
+    for (pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list)
+    {
+        chwall_ssid =
+            GET_SSIDP(ACM_CHINESE_WALL_POLICY,
+                      (struct acm_ssid_domain *) (*pd)->ssid);
+        chwall_ssidref = chwall_ssid->chwall_ssidref;
+        traceprintk("%s: validating policy for domain %x (chwall-REF=%x).\n",
+                    __func__, (*pd)->domain_id, chwall_ssidref);
+        /* a) adjust types ref-count for running domains */
+        for (i = 0; i < chwall_buf->chwall_max_types; i++)
+            running_types[i] +=
+                ssidrefs[chwall_ssidref * chwall_buf->chwall_max_types + i];
+
+        /* b) check for conflict */
+        for (i = 0; i < chwall_buf->chwall_max_types; i++)
+            if (conflict_aggregate_set[i] &&
+                ssidrefs[chwall_ssidref * chwall_buf->chwall_max_types + i])
+            {
+                printk("%s: CHINESE WALL CONFLICT in type %02x.\n",
+                       __func__, i);
+                violation = 1;
+                goto out;
+            }
+        /* set violation and break out of the loop */
+        /* c) adapt conflict aggregate set for this domain (notice conflicts) 
*/
+        for (i = 0; i < chwall_buf->chwall_max_conflictsets; i++)
+        {
+            int common = 0;
+            /* check if conflict_set_i and ssidref have common types */
+            for (j = 0; j < chwall_buf->chwall_max_types; j++)
+                if (conflict_sets[i * chwall_buf->chwall_max_types + j] &&
+                    ssidrefs[chwall_ssidref *
+                            chwall_buf->chwall_max_types + j])
+                {
+                    common = 1;
+                    break;
+                }
+            if (common == 0)
+                continue;       /* try next conflict set */
+            /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
+            for (j = 0; j < chwall_buf->chwall_max_types; j++)
+                if (conflict_sets[i * chwall_buf->chwall_max_types + j] &&
+                    !ssidrefs[chwall_ssidref *
+                             chwall_buf->chwall_max_types + j])
+                    conflict_aggregate_set[j]++;
+        }
+    }
  out:
-        write_unlock(&domlist_lock);
-       return violation;
-       /* returning "violation != 0" means that the currently running set of 
domains would 
-        * not be possible if the new policy had been enforced before starting 
them; for chinese
-        * wall, this means that the new policy includes at least one conflict 
set of which 
-        * more than one type is currently running */
-}
-
-static int
-chwall_set_policy(u8 *buf, u16 buf_size) 
-{      
-       /* policy write-locked already */
-       struct acm_chwall_policy_buffer *chwall_buf = (struct 
acm_chwall_policy_buffer *)buf;
-       void *ssids = NULL, *conflict_sets = NULL, *running_types = NULL, 
*conflict_aggregate_set = NULL;       
-
-        /* rewrite the policy due to endianess */
-        chwall_buf->policy_code                      = 
ntohl(chwall_buf->policy_code);
-        chwall_buf->policy_version                   = 
ntohl(chwall_buf->policy_version);
-        chwall_buf->chwall_max_types                 = 
ntohl(chwall_buf->chwall_max_types);
-        chwall_buf->chwall_max_ssidrefs              = 
ntohl(chwall_buf->chwall_max_ssidrefs);
-        chwall_buf->chwall_max_conflictsets          = 
ntohl(chwall_buf->chwall_max_conflictsets);
-        chwall_buf->chwall_ssid_offset               = 
ntohl(chwall_buf->chwall_ssid_offset);
-        chwall_buf->chwall_conflict_sets_offset      = 
ntohl(chwall_buf->chwall_conflict_sets_offset);
-        chwall_buf->chwall_running_types_offset      = 
ntohl(chwall_buf->chwall_running_types_offset);
-        chwall_buf->chwall_conflict_aggregate_offset = 
ntohl(chwall_buf->chwall_conflict_aggregate_offset);
-
-       /* policy type and version checks */
-       if ((chwall_buf->policy_code != ACM_CHINESE_WALL_POLICY) ||
-           (chwall_buf->policy_version != ACM_CHWALL_VERSION))
-               return -EINVAL;
-
-       /* 1. allocate new buffers */
-       ssids = xmalloc_array(domaintype_t, 
chwall_buf->chwall_max_types*chwall_buf->chwall_max_ssidrefs);
-       conflict_sets = xmalloc_array(domaintype_t, 
chwall_buf->chwall_max_conflictsets*chwall_buf->chwall_max_types);
-       running_types = 
xmalloc_array(domaintype_t,chwall_buf->chwall_max_types);
-       conflict_aggregate_set = xmalloc_array(domaintype_t, 
chwall_buf->chwall_max_types);
-
-       if ((ssids == NULL)||(conflict_sets == NULL)||(running_types == 
NULL)||(conflict_aggregate_set == NULL))
-               goto error_free;
-
-       /* 2. set new policy */
-       if (chwall_buf->chwall_ssid_offset + sizeof(domaintype_t) * 
-           chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs > 
buf_size)
-               goto error_free;
-       arrcpy(ssids, buf + chwall_buf->chwall_ssid_offset,
-              sizeof(domaintype_t),  
-              chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs);
-
-       if (chwall_buf->chwall_conflict_sets_offset + sizeof(domaintype_t) * 
-           chwall_buf->chwall_max_types * chwall_buf->chwall_max_conflictsets 
> buf_size)
-               goto error_free;
-
-       arrcpy(conflict_sets, buf + chwall_buf->chwall_conflict_sets_offset,
-              sizeof(domaintype_t),
-              chwall_buf->chwall_max_types * 
chwall_buf->chwall_max_conflictsets);
-
-       /* we also use new state buffers since max_types can change */
-       memset(running_types, 0, 
sizeof(domaintype_t)*chwall_buf->chwall_max_types);
-       memset(conflict_aggregate_set, 0, 
sizeof(domaintype_t)*chwall_buf->chwall_max_types);
-
-       /* 3. now re-calculate the state for the new policy based on running 
domains; 
-        *    this can fail if new policy is conflicting with running domains */
-       if (chwall_init_state(chwall_buf, ssids, conflict_sets, running_types, 
conflict_aggregate_set)) {
-               printk("%s: New policy conflicts with running domains. Policy 
load aborted.\n", __func__);
-               goto error_free; /* new policy conflicts with running domains */
-       }
-       /* 4. free old policy buffers, replace with new ones */
-       chwall_bin_pol.max_types = chwall_buf->chwall_max_types;
-       chwall_bin_pol.max_ssidrefs = chwall_buf->chwall_max_ssidrefs;
-       chwall_bin_pol.max_conflictsets = chwall_buf->chwall_max_conflictsets;
-       if (chwall_bin_pol.ssidrefs != NULL) 
-               xfree(chwall_bin_pol.ssidrefs);
-       if (chwall_bin_pol.conflict_aggregate_set != NULL) 
-               xfree(chwall_bin_pol.conflict_aggregate_set);
-       if (chwall_bin_pol.running_types != NULL) 
-               xfree(chwall_bin_pol.running_types);
-       if (chwall_bin_pol.conflict_sets != NULL) 
-               xfree(chwall_bin_pol.conflict_sets);
-       chwall_bin_pol.ssidrefs = ssids;
-       chwall_bin_pol.conflict_aggregate_set = conflict_aggregate_set;
-       chwall_bin_pol.running_types = running_types;
-       chwall_bin_pol.conflict_sets = conflict_sets;
-       return ACM_OK;
-
-error_free:
-       printk("%s: ERROR setting policy.\n", __func__);
-       if (ssids != NULL) xfree(ssids);
-       if (conflict_sets != NULL) xfree(conflict_sets);
-       if (running_types != NULL) xfree(running_types);
-       if (conflict_aggregate_set != NULL) xfree(conflict_aggregate_set);
-       return -EFAULT;
-}
-       
-static int 
-chwall_dump_stats(u8 *buf, u16 len)
-{
-       /* no stats for Chinese Wall Policy */
-       return 0;
-}
-
-static int
-chwall_dump_ssid_types(ssidref_t ssidref, u8 *buf, u16 len)
+    write_unlock(&domlist_lock);
+    return violation;
+    /* returning "violation != 0" means that the currently running set of 
domains would
+     * not be possible if the new policy had been enforced before starting 
them; for chinese
+     * wall, this means that the new policy includes at least one conflict set 
of which
+     * more than one type is currently running */
+}
+
+static int chwall_set_policy(u8 * buf, u32 buf_size)
+{
+    /* policy write-locked already */
+    struct acm_chwall_policy_buffer *chwall_buf =
+        (struct acm_chwall_policy_buffer *) buf;
+    void *ssids = NULL, *conflict_sets = NULL, *running_types =
+        NULL, *conflict_aggregate_set = NULL;
+
+    if (buf_size < sizeof(struct acm_chwall_policy_buffer))
+        return -EINVAL;
+
+    /* rewrite the policy due to endianess */
+    chwall_buf->policy_code = ntohl(chwall_buf->policy_code);
+    chwall_buf->policy_version = ntohl(chwall_buf->policy_version);
+    chwall_buf->chwall_max_types = ntohl(chwall_buf->chwall_max_types);
+    chwall_buf->chwall_max_ssidrefs =
+        ntohl(chwall_buf->chwall_max_ssidrefs);
+    chwall_buf->chwall_max_conflictsets =
+        ntohl(chwall_buf->chwall_max_conflictsets);
+    chwall_buf->chwall_ssid_offset = ntohl(chwall_buf->chwall_ssid_offset);
+    chwall_buf->chwall_conflict_sets_offset =
+        ntohl(chwall_buf->chwall_conflict_sets_offset);
+    chwall_buf->chwall_running_types_offset =
+        ntohl(chwall_buf->chwall_running_types_offset);
+    chwall_buf->chwall_conflict_aggregate_offset =
+        ntohl(chwall_buf->chwall_conflict_aggregate_offset);
+
+    /* policy type and version checks */
+    if ((chwall_buf->policy_code != ACM_CHINESE_WALL_POLICY) ||
+        (chwall_buf->policy_version != ACM_CHWALL_VERSION))
+        return -EINVAL;
+
+    /* 1. allocate new buffers */
+    ssids =
+        xmalloc_array(domaintype_t,
+                      chwall_buf->chwall_max_types *
+                      chwall_buf->chwall_max_ssidrefs);
+    conflict_sets =
+        xmalloc_array(domaintype_t,
+                      chwall_buf->chwall_max_conflictsets *
+                      chwall_buf->chwall_max_types);
+    running_types =
+        xmalloc_array(domaintype_t, chwall_buf->chwall_max_types);
+    conflict_aggregate_set =
+        xmalloc_array(domaintype_t, chwall_buf->chwall_max_types);
+
+    if ((ssids == NULL) || (conflict_sets == NULL)
+        || (running_types == NULL) || (conflict_aggregate_set == NULL))
+        goto error_free;
+
+    /* 2. set new policy */
+    if (chwall_buf->chwall_ssid_offset + sizeof(domaintype_t) *
+        chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs >
+        buf_size)
+        goto error_free;
+
+    arrcpy(ssids, buf + chwall_buf->chwall_ssid_offset,
+           sizeof(domaintype_t),
+           chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs);
+
+    if (chwall_buf->chwall_conflict_sets_offset + sizeof(domaintype_t) *
+        chwall_buf->chwall_max_types *
+        chwall_buf->chwall_max_conflictsets > buf_size)
+        goto error_free;
+
+    arrcpy(conflict_sets, buf + chwall_buf->chwall_conflict_sets_offset,
+           sizeof(domaintype_t),
+           chwall_buf->chwall_max_types *
+           chwall_buf->chwall_max_conflictsets);
+
+    /* we also use new state buffers since max_types can change */
+    memset(running_types, 0,
+           sizeof(domaintype_t) * chwall_buf->chwall_max_types);
+    memset(conflict_aggregate_set, 0,
+           sizeof(domaintype_t) * chwall_buf->chwall_max_types);
+
+    /* 3. now re-calculate the state for the new policy based on running 
domains;
+     *    this can fail if new policy is conflicting with running domains */
+    if (chwall_init_state(chwall_buf, ssids,
+                          conflict_sets, running_types,
+                          conflict_aggregate_set))
+    {
+        printk("%s: New policy conflicts with running domains. Policy load 
aborted.\n",
+               __func__);
+        goto error_free;        /* new policy conflicts with running domains */
+    }
+    /* 4. free old policy buffers, replace with new ones */
+    chwall_bin_pol.max_types = chwall_buf->chwall_max_types;
+    chwall_bin_pol.max_ssidrefs = chwall_buf->chwall_max_ssidrefs;
+    chwall_bin_pol.max_conflictsets = chwall_buf->chwall_max_conflictsets;
+    if (chwall_bin_pol.ssidrefs != NULL)
+        xfree(chwall_bin_pol.ssidrefs);
+    if (chwall_bin_pol.conflict_aggregate_set != NULL)
+        xfree(chwall_bin_pol.conflict_aggregate_set);
+    if (chwall_bin_pol.running_types != NULL)
+        xfree(chwall_bin_pol.running_types);
+    if (chwall_bin_pol.conflict_sets != NULL)
+        xfree(chwall_bin_pol.conflict_sets);
+    chwall_bin_pol.ssidrefs = ssids;
+    chwall_bin_pol.conflict_aggregate_set = conflict_aggregate_set;
+    chwall_bin_pol.running_types = running_types;
+    chwall_bin_pol.conflict_sets = conflict_sets;
+    return ACM_OK;
+
+ error_free:
+    printk("%s: ERROR setting policy.\n", __func__);
+    if (ssids != NULL)
+        xfree(ssids);
+    if (conflict_sets != NULL)
+        xfree(conflict_sets);
+    if (running_types != NULL)
+        xfree(running_types);
+    if (conflict_aggregate_set != NULL)
+        xfree(conflict_aggregate_set);
+    return -EFAULT;
+}
+
+static int chwall_dump_stats(u8 * buf, u16 len)
+{
+    /* no stats for Chinese Wall Policy */
+    return 0;
+}
+
+static int chwall_dump_ssid_types(ssidref_t ssidref, u8 * buf, u16 len)
 {
     int i;
 
@@ -319,12 +385,14 @@
     if (chwall_bin_pol.max_types > len)
         return -EFAULT;
 
-       if (ssidref >= chwall_bin_pol.max_ssidrefs)
-               return -EFAULT;
+    if (ssidref >= chwall_bin_pol.max_ssidrefs)
+        return -EFAULT;
 
     /* read types for chwall ssidref */
-    for(i=0; i< chwall_bin_pol.max_types; i++) {
-        if (chwall_bin_pol.ssidrefs[ssidref * chwall_bin_pol.max_types + i])
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+    {
+        if (chwall_bin_pol.
+            ssidrefs[ssidref * chwall_bin_pol.max_types + i])
             buf[i] = 1;
         else
             buf[i] = 0;
@@ -336,198 +404,239 @@
  * Authorization functions
  ***************************/
 
-
 /* -------- DOMAIN OPERATION HOOKS -----------*/
 
-static int 
-chwall_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
-{
-       ssidref_t chwall_ssidref;
-       int i,j;
-       traceprintk("%s.\n", __func__);
-
-       read_lock(&acm_bin_pol_rwlock);
-       chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
-       if (chwall_ssidref == ACM_DEFAULT_LOCAL_SSID) {
-               printk("%s: ERROR CHWALL SSID is NOT SET but policy 
enforced.\n", __func__);
-               read_unlock(&acm_bin_pol_rwlock);
-               return ACM_ACCESS_DENIED; /* catching and indicating config 
error */
-       }
-       if (chwall_ssidref >= chwall_bin_pol.max_ssidrefs) {
-               printk("%s: ERROR chwall_ssidref > max(%x).\n",
-                      __func__, chwall_bin_pol.max_ssidrefs-1);
-               read_unlock(&acm_bin_pol_rwlock);
-               return ACM_ACCESS_DENIED;
-       }
-       /* A: chinese wall check for conflicts */
-       for (i=0; i< chwall_bin_pol.max_types; i++)
-               if (chwall_bin_pol.conflict_aggregate_set[i] && 
-                   
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i]) {
-                       printk("%s: CHINESE WALL CONFLICT in type %02x.\n", 
__func__, i);
-                       read_unlock(&acm_bin_pol_rwlock);
-                       return ACM_ACCESS_DENIED;
-               }
-
-       /* B: chinese wall conflict set adjustment (so that other 
-        *      other domains simultaneously created are evaluated against this 
new set)*/
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]++;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-       return ACM_ACCESS_PERMITTED;
-}
-
-static void
-chwall_post_domain_create(domid_t domid, ssidref_t ssidref)
-{
-       int i,j;
-       ssidref_t chwall_ssidref;
-       traceprintk("%s.\n", __func__);
-       
-       read_lock(&acm_bin_pol_rwlock);
-       chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
-       /* adjust types ref-count for running domains */
-       for (i=0; i< chwall_bin_pol.max_types; i++)
-               chwall_bin_pol.running_types[i] +=
-                       
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i];
-       if (domid) {
-               read_unlock(&acm_bin_pol_rwlock);
-               return;
-       }
-       /* Xen does not call pre-create hook for DOM0;
-        * to consider type conflicts of any domain with DOM0, we need
-        * to adjust the conflict_aggregate for DOM0 here the same way it
-        * is done for non-DOM0 domains in the pre-hook */
-       printkd("%s: adjusting security state for DOM0 (ssidref=%x, 
chwall_ssidref=%x).\n", 
-               __func__, ssidref, chwall_ssidref);
-
-       /* chinese wall conflict set adjustment (so that other 
-        *      other domains simultaneously created are evaluated against this 
new set)*/
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]++;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-       return;
+static int chwall_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
+{
+    ssidref_t chwall_ssidref;
+    int i, j;
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+    if (chwall_ssidref == ACM_DEFAULT_LOCAL_SSID)
+    {
+        printk("%s: ERROR CHWALL SSID is NOT SET but policy enforced.\n",
+               __func__);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED;       /* catching and indicating config 
error */
+    }
+    if (chwall_ssidref >= chwall_bin_pol.max_ssidrefs)
+    {
+        printk("%s: ERROR chwall_ssidref > max(%x).\n",
+               __func__, chwall_bin_pol.max_ssidrefs - 1);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED;
+    }
+    /* A: chinese wall check for conflicts */
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+        if (chwall_bin_pol.conflict_aggregate_set[i] &&
+            chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                   chwall_bin_pol.max_types + i])
+        {
+            printk("%s: CHINESE WALL CONFLICT in type %02x.\n", __func__, i);
+            read_unlock(&acm_bin_pol_rwlock);
+            return ACM_ACCESS_DENIED;
+        }
+
+    /* B: chinese wall conflict set adjustment (so that other
+     *      other domains simultaneously created are evaluated against this 
new set)*/
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]++;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+    return ACM_ACCESS_PERMITTED;
+}
+
+static void chwall_post_domain_create(domid_t domid, ssidref_t ssidref)
+{
+    int i, j;
+    ssidref_t chwall_ssidref;
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+    /* adjust types ref-count for running domains */
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+        chwall_bin_pol.running_types[i] +=
+            chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                   chwall_bin_pol.max_types + i];
+    if (domid)
+    {
+        read_unlock(&acm_bin_pol_rwlock);
+        return;
+    }
+    /* Xen does not call pre-create hook for DOM0;
+     * to consider type conflicts of any domain with DOM0, we need
+     * to adjust the conflict_aggregate for DOM0 here the same way it
+     * is done for non-DOM0 domains in the pre-hook */
+    printkd("%s: adjusting security state for DOM0 (ssidref=%x, 
chwall_ssidref=%x).\n",
+            __func__, ssidref, chwall_ssidref);
+
+    /* chinese wall conflict set adjustment (so that other
+     *      other domains simultaneously created are evaluated against this 
new set)*/
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]++;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+    return;
 }
 
 static void
 chwall_fail_domain_create(void *subject_ssid, ssidref_t ssidref)
 {
-       int i, j;
-       ssidref_t chwall_ssidref;
-       traceprintk("%s.\n", __func__);
-
-       read_lock(&acm_bin_pol_rwlock);
-       chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
-       /* roll-back: re-adjust conflicting types aggregate */
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set, this one does not 
include any type of chwall_ssidref */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]--;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-}
-
-
-static void
-chwall_post_domain_destroy(void *object_ssid, domid_t id) 
-{
-       int i,j;
-       struct chwall_ssid *chwall_ssidp = 
-               GET_SSIDP(ACM_CHINESE_WALL_POLICY, (struct acm_ssid_domain 
*)object_ssid);
-       ssidref_t chwall_ssidref = chwall_ssidp->chwall_ssidref;
-
-       traceprintk("%s.\n", __func__);
-
-       read_lock(&acm_bin_pol_rwlock);
-       /* adjust running types set */
-       for (i=0; i< chwall_bin_pol.max_types; i++)
-               chwall_bin_pol.running_types[i] -=
-                       
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i];
-
-       /* roll-back: re-adjust conflicting types aggregate */
-       for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
-               int common = 0;
-               /* check if conflict_set_i and ssidref have common types */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
-                               common = 1;
-                               break;
-                       }
-               if (common == 0)
-                       continue; /* try next conflict set, this one does not 
include any type of chwall_ssidref */
-               /* now add types of the conflict set to conflict_aggregate_set 
(except types in chwall_ssidref) */
-               for (j=0; j<chwall_bin_pol.max_types; j++)
-                       if 
(chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
-                           
!chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
-                               chwall_bin_pol.conflict_aggregate_set[j]--;
-       }
-       read_unlock(&acm_bin_pol_rwlock);
-       return;
+    int i, j;
+    ssidref_t chwall_ssidref;
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+    /* roll-back: re-adjust conflicting types aggregate */
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set, this one does not 
include any type of chwall_ssidref */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]--;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+}
+
+
+static void chwall_post_domain_destroy(void *object_ssid, domid_t id)
+{
+    int i, j;
+    struct chwall_ssid *chwall_ssidp = GET_SSIDP(ACM_CHINESE_WALL_POLICY,
+                                                 (struct acm_ssid_domain *)
+                                                 object_ssid);
+    ssidref_t chwall_ssidref = chwall_ssidp->chwall_ssidref;
+
+    traceprintk("%s.\n", __func__);
+
+    read_lock(&acm_bin_pol_rwlock);
+    /* adjust running types set */
+    for (i = 0; i < chwall_bin_pol.max_types; i++)
+        chwall_bin_pol.running_types[i] -=
+            chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                   chwall_bin_pol.max_types + i];
+
+    /* roll-back: re-adjust conflicting types aggregate */
+    for (i = 0; i < chwall_bin_pol.max_conflictsets; i++)
+    {
+        int common = 0;
+        /* check if conflict_set_i and ssidref have common types */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                          chwall_bin_pol.max_types + j])
+            {
+                common = 1;
+                break;
+            }
+        if (common == 0)
+            continue;           /* try next conflict set, this one does not 
include any type of chwall_ssidref */
+        /* now add types of the conflict set to conflict_aggregate_set (except 
types in chwall_ssidref) */
+        for (j = 0; j < chwall_bin_pol.max_types; j++)
+            if (chwall_bin_pol.
+                conflict_sets[i * chwall_bin_pol.max_types + j]
+                && !chwall_bin_pol.ssidrefs[chwall_ssidref *
+                                           chwall_bin_pol.max_types + j])
+                chwall_bin_pol.conflict_aggregate_set[j]--;
+    }
+    read_unlock(&acm_bin_pol_rwlock);
+    return;
 }
 
 struct acm_operations acm_chinesewall_ops = {
-       /* policy management services */
-       .init_domain_ssid               = chwall_init_domain_ssid,
-       .free_domain_ssid               = chwall_free_domain_ssid,
-       .dump_binary_policy             = chwall_dump_policy,
-       .set_binary_policy              = chwall_set_policy,
-       .dump_statistics                = chwall_dump_stats,
-    .dump_ssid_types        = chwall_dump_ssid_types,
-       /* domain management control hooks */
-       .pre_domain_create              = chwall_pre_domain_create,
-       .post_domain_create             = chwall_post_domain_create,
-       .fail_domain_create             = chwall_fail_domain_create,
-       .post_domain_destroy            = chwall_post_domain_destroy,
-       /* event channel control hooks */
-       .pre_eventchannel_unbound       = NULL,
-       .fail_eventchannel_unbound      = NULL,
-       .pre_eventchannel_interdomain   = NULL,
-       .fail_eventchannel_interdomain  = NULL,
-       /* grant table control hooks */
-       .pre_grant_map_ref              = NULL,
-       .fail_grant_map_ref             = NULL,
-       .pre_grant_setup                = NULL,
-       .fail_grant_setup               = NULL,
+    /* policy management services */
+    .init_domain_ssid = chwall_init_domain_ssid,
+    .free_domain_ssid = chwall_free_domain_ssid,
+    .dump_binary_policy = chwall_dump_policy,
+    .set_binary_policy = chwall_set_policy,
+    .dump_statistics = chwall_dump_stats,
+    .dump_ssid_types = chwall_dump_ssid_types,
+    /* domain management control hooks */
+    .pre_domain_create = chwall_pre_domain_create,
+    .post_domain_create = chwall_post_domain_create,
+    .fail_domain_create = chwall_fail_domain_create,
+    .post_domain_destroy = chwall_post_domain_destroy,
+    /* event channel control hooks */
+    .pre_eventchannel_unbound = NULL,
+    .fail_eventchannel_unbound = NULL,
+    .pre_eventchannel_interdomain = NULL,
+    .fail_eventchannel_interdomain = NULL,
+    /* grant table control hooks */
+    .pre_grant_map_ref = NULL,
+    .fail_grant_map_ref = NULL,
+    .pre_grant_setup = NULL,
+    .fail_grant_setup = NULL,
+    /* generic domain-requested decision hooks */
+    .sharing = NULL,
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/acm/acm_core.c
--- a/xen/acm/acm_core.c        Fri Oct 21 19:58:39 2005
+++ b/xen/acm/acm_core.c        Mon Oct 24 15:08:13 2005
@@ -47,7 +47,7 @@
 void acm_init_ste_policy(void);
 
 extern struct acm_operations acm_chinesewall_ops, 
-       acm_simple_type_enforcement_ops, acm_null_ops;
+    acm_simple_type_enforcement_ops, acm_null_ops;
 
 /* global ops structs called by the hooks */
 struct acm_operations *acm_primary_ops = NULL;
@@ -66,7 +66,7 @@
     u32 test = 1;
     if (*((u8 *)&test) == 1)
     {
-       printk("ACM module running in LITTLE ENDIAN.\n");
+        printk("ACM module running in LITTLE ENDIAN.\n");
         little_endian = 1;
     }
     else
@@ -80,10 +80,10 @@
 static void
 acm_init_binary_policy(void *primary, void *secondary)
 {
-       acm_bin_pol.primary_policy_code = 0;
-       acm_bin_pol.secondary_policy_code = 0;
-       acm_bin_pol.primary_binary_policy = primary;
-       acm_bin_pol.secondary_binary_policy = secondary;
+    acm_bin_pol.primary_policy_code = 0;
+    acm_bin_pol.secondary_policy_code = 0;
+    acm_bin_pol.primary_binary_policy = primary;
+    acm_bin_pol.secondary_binary_policy = secondary;
 }
 
 static int
@@ -96,7 +96,7 @@
     int rc = ACM_OK;
 
     if (mbi->mods_count > 1)
-           *initrdidx = 1;
+        *initrdidx = 1;
 
     /*
      * Try all modules and see whichever could be the binary policy.
@@ -115,14 +115,14 @@
 #error Architecture unsupported by sHype
 #endif
         _policy_len   = mod[i].mod_end - mod[i].mod_start;
-       if (_policy_len < sizeof(struct acm_policy_buffer))
-               continue; /* not a policy */
+        if (_policy_len < sizeof(struct acm_policy_buffer))
+            continue; /* not a policy */
 
         pol = (struct acm_policy_buffer *)_policy_start;
         if (ntohl(pol->magic) == ACM_MAGIC)
         {
             rc = acm_set_policy((void *)_policy_start,
-                                (u16)_policy_len,
+                                (u32)_policy_len,
                                 0);
             if (rc == ACM_OK)
             {
@@ -145,7 +145,7 @@
             }
             else
             {
-               printk("Invalid policy. %d.th module line.\n", i+1);
+                printk("Invalid policy. %d.th module line.\n", i+1);
             }
         } /* end if a binary policy definition, i.e., (ntohl(pol->magic) == 
ACM_MAGIC ) */
     }
@@ -158,10 +158,10 @@
          const multiboot_info_t *mbi,
          unsigned long initial_images_start)
 {
-       int ret = ACM_OK;
+    int ret = ACM_OK;
 
     acm_set_endian();
-       write_lock(&acm_bin_pol_rwlock);
+    write_lock(&acm_bin_pol_rwlock);
     acm_init_binary_policy(NULL, NULL);
 
     /* set primary policy component */
@@ -170,14 +170,14 @@
 
     case ACM_CHINESE_WALL_POLICY:
         acm_init_chwall_policy();
-               acm_bin_pol.primary_policy_code = ACM_CHINESE_WALL_POLICY;
-               acm_primary_ops = &acm_chinesewall_ops;
+        acm_bin_pol.primary_policy_code = ACM_CHINESE_WALL_POLICY;
+        acm_primary_ops = &acm_chinesewall_ops;
         break;
 
     case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
         acm_init_ste_policy();
-               acm_bin_pol.primary_policy_code = 
ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
-               acm_primary_ops = &acm_simple_type_enforcement_ops;
+        acm_bin_pol.primary_policy_code = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+        acm_primary_ops = &acm_simple_type_enforcement_ops;
         break;
 
     default:
@@ -190,9 +190,9 @@
     /* secondary policy component part */
     switch ((ACM_USE_SECURITY_POLICY) >> 4) {
     case ACM_NULL_POLICY:
-               acm_bin_pol.secondary_policy_code = ACM_NULL_POLICY;
-               acm_secondary_ops = &acm_null_ops;
-               break;
+        acm_bin_pol.secondary_policy_code = ACM_NULL_POLICY;
+        acm_secondary_ops = &acm_null_ops;
+        break;
 
     case ACM_CHINESE_WALL_POLICY:
         if (acm_bin_pol.primary_policy_code == ACM_CHINESE_WALL_POLICY)
@@ -200,9 +200,9 @@
             ret = -EINVAL;
             goto out;
         }
-               acm_init_chwall_policy();
+        acm_init_chwall_policy();
         acm_bin_pol.secondary_policy_code = ACM_CHINESE_WALL_POLICY;
-               acm_secondary_ops = &acm_chinesewall_ops;
+        acm_secondary_ops = &acm_chinesewall_ops;
         break;
 
     case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
@@ -211,9 +211,9 @@
             ret = -EINVAL;
             goto out;
         }
-               acm_init_ste_policy();
-               acm_bin_pol.secondary_policy_code = 
ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
-               acm_secondary_ops = &acm_simple_type_enforcement_ops;
+        acm_init_ste_policy();
+        acm_bin_pol.secondary_policy_code = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+        acm_secondary_ops = &acm_simple_type_enforcement_ops;
         break;
 
     default:
@@ -222,96 +222,103 @@
     }
 
  out:
-       write_unlock(&acm_bin_pol_rwlock);
-
-       if (ret != ACM_OK)
-    {
-        printk("%s: Error setting policies.\n", __func__);
+    write_unlock(&acm_bin_pol_rwlock);
+
+    if (ret != ACM_OK)
+    {
+        printk("%s: Error initializing policies.\n", __func__);
         /* here one could imagine a clean panic */
-               return -EINVAL;
-       }
-       acm_setup(initrdidx, mbi, initial_images_start);
-       printk("%s: Enforcing Primary %s, Secondary %s.\n", __func__, 
-              ACM_POLICY_NAME(acm_bin_pol.primary_policy_code),
+        return -EINVAL;
+    }
+    if (acm_setup(initrdidx, mbi, initial_images_start) != ACM_OK)
+    {
+        printk("%s: Error loading policy at boot time.\n", __func__);
+        /* ignore, just continue with the minimal hardcoded startup policy */
+    }
+    printk("%s: Enforcing Primary %s, Secondary %s.\n", __func__, 
+           ACM_POLICY_NAME(acm_bin_pol.primary_policy_code),
            ACM_POLICY_NAME(acm_bin_pol.secondary_policy_code));
-       return ret;
+    return ret;
 }
 
 int
 acm_init_domain_ssid(domid_t id, ssidref_t ssidref)
 {
-       struct acm_ssid_domain *ssid;
-       struct domain *subj = find_domain_by_id(id);
-       int ret1, ret2;
-       
-       if (subj == NULL)
-    {
-               printk("%s: ACM_NULL_POINTER ERROR (id=%x).\n", __func__, id);
-               return ACM_NULL_POINTER_ERROR;
-       }
-       if ((ssid = xmalloc(struct acm_ssid_domain)) == NULL)
-               return ACM_INIT_SSID_ERROR;
-
-       ssid->datatype       = DOMAIN;
-       ssid->subject        = subj;
-       ssid->domainid       = subj->domain_id;
-       ssid->primary_ssid   = NULL;
-       ssid->secondary_ssid = NULL;
-
-       if (ACM_USE_SECURITY_POLICY != ACM_NULL_POLICY)
-               ssid->ssidref = ssidref;
-       else
-               ssid->ssidref = ACM_DEFAULT_SSID;
-
-       subj->ssid           = ssid;
-       /* now fill in primary and secondary parts; we only get here through 
hooks */
-       if (acm_primary_ops->init_domain_ssid != NULL)
-               ret1 = acm_primary_ops->init_domain_ssid(&(ssid->primary_ssid), 
ssidref);
-       else
-               ret1 = ACM_OK;
-
-       if (acm_secondary_ops->init_domain_ssid != NULL)
-               ret2 = 
acm_secondary_ops->init_domain_ssid(&(ssid->secondary_ssid), ssidref);
-       else
-               ret2 = ACM_OK;
-
-       if ((ret1 != ACM_OK) || (ret2 != ACM_OK))
-    {
-               printk("%s: ERROR instantiating individual ssids for domain 
0x%02x.\n",
-                      __func__, subj->domain_id);
-               acm_free_domain_ssid(ssid);     
-               put_domain(subj);
-               return ACM_INIT_SSID_ERROR;
-       }
-       printk("%s: assigned domain %x the ssidref=%x.\n",
+    struct acm_ssid_domain *ssid;
+    struct domain *subj = find_domain_by_id(id);
+    int ret1, ret2;
+ 
+    if (subj == NULL)
+    {
+        printk("%s: ACM_NULL_POINTER ERROR (id=%x).\n", __func__, id);
+        return ACM_NULL_POINTER_ERROR;
+    }
+    if ((ssid = xmalloc(struct acm_ssid_domain)) == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+    ssid->datatype       = DOMAIN;
+    ssid->subject        = subj;
+    ssid->domainid      = subj->domain_id;
+    ssid->primary_ssid   = NULL;
+    ssid->secondary_ssid = NULL;
+
+    if (ACM_USE_SECURITY_POLICY != ACM_NULL_POLICY)
+        ssid->ssidref = ssidref;
+    else
+        ssid->ssidref = ACM_DEFAULT_SSID;
+
+    subj->ssid           = ssid;
+    /* now fill in primary and secondary parts; we only get here through hooks 
*/
+    if (acm_primary_ops->init_domain_ssid != NULL)
+        ret1 = acm_primary_ops->init_domain_ssid(&(ssid->primary_ssid), 
ssidref);
+    else
+        ret1 = ACM_OK;
+
+    if (acm_secondary_ops->init_domain_ssid != NULL)
+        ret2 = acm_secondary_ops->init_domain_ssid(&(ssid->secondary_ssid), 
ssidref);
+    else
+        ret2 = ACM_OK;
+
+    if ((ret1 != ACM_OK) || (ret2 != ACM_OK))
+    {
+        printk("%s: ERROR instantiating individual ssids for domain 0x%02x.\n",
+               __func__, subj->domain_id);
+        acm_free_domain_ssid(ssid); 
+        put_domain(subj);
+        return ACM_INIT_SSID_ERROR;
+    }
+    printk("%s: assigned domain %x the ssidref=%x.\n",
            __func__, id, ssid->ssidref);
-       put_domain(subj);
-       return ACM_OK;
-}
-
-
-int
+    put_domain(subj);
+    return ACM_OK;
+}
+
+
+void
 acm_free_domain_ssid(struct acm_ssid_domain *ssid)
 {
-       domid_t id;
-
-       /* domain is already gone, just ssid is left */
-       if (ssid == NULL)
-    {
-               printk("%s: ACM_NULL_POINTER ERROR.\n", __func__);
-               return ACM_NULL_POINTER_ERROR;
-       }
-    id = ssid->domainid;
-       ssid->subject        = NULL;
-
-       if (acm_primary_ops->free_domain_ssid != NULL) /* null policy */
-               acm_primary_ops->free_domain_ssid(ssid->primary_ssid);
-       ssid->primary_ssid = NULL;
-       if (acm_secondary_ops->free_domain_ssid != NULL)
-               acm_secondary_ops->free_domain_ssid(ssid->secondary_ssid);
-       ssid->secondary_ssid = NULL;
-       xfree(ssid);
-       printkd("%s: Freed individual domain ssid (domain=%02x).\n",
+    /* domain is already gone, just ssid is left */
+    if (ssid == NULL)
+        return;
+
+    ssid->subject = NULL;
+    if (acm_primary_ops->free_domain_ssid != NULL) /* null policy */
+        acm_primary_ops->free_domain_ssid(ssid->primary_ssid);
+    ssid->primary_ssid = NULL;
+    if (acm_secondary_ops->free_domain_ssid != NULL)
+        acm_secondary_ops->free_domain_ssid(ssid->secondary_ssid);
+    ssid->secondary_ssid = NULL;
+    xfree(ssid);
+    printkd("%s: Freed individual domain ssid (domain=%02x).\n",
             __func__, id);
-       return ACM_OK;
-}
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/acm/acm_null_hooks.c
--- a/xen/acm/acm_null_hooks.c  Fri Oct 21 19:58:39 2005
+++ b/xen/acm/acm_null_hooks.c  Mon Oct 24 15:08:13 2005
@@ -11,37 +11,38 @@
  * published by the Free Software Foundation, version 2 of the
  * License.
  */
+
 #include <acm/acm_hooks.h>
 
 static int
 null_init_domain_ssid(void **ssid, ssidref_t ssidref)
 {
-       return ACM_OK;
+    return ACM_OK;
 }
 
 static void
 null_free_domain_ssid(void *ssid)
 {
-       return;
+    return;
 }
 
 static int
-null_dump_binary_policy(u8 *buf, u16 buf_size) 
-{      
-       return 0;
+null_dump_binary_policy(u8 *buf, u32 buf_size)
+{ 
+    return 0;
 }
 
 static int
-null_set_binary_policy(u8 *buf, u16 buf_size) 
-{      
-       return ACM_OK;
+null_set_binary_policy(u8 *buf, u32 buf_size)
+{ 
+    return ACM_OK;
 }
-       
+ 
 static int 
 null_dump_stats(u8 *buf, u16 buf_size)
 {
-       /* no stats for NULL policy */
-       return 0;
+    /* no stats for NULL policy */
+    return 0;
 }
 
 static int
@@ -54,25 +55,35 @@
 
 /* now define the hook structure similarly to LSM */
 struct acm_operations acm_null_ops = {
-       .init_domain_ssid               = null_init_domain_ssid,
-       .free_domain_ssid               = null_free_domain_ssid,
-       .dump_binary_policy             = null_dump_binary_policy,
-       .set_binary_policy              = null_set_binary_policy,
-       .dump_statistics                = null_dump_stats,
-    .dump_ssid_types        = null_dump_ssid_types,
-       /* domain management control hooks */
-       .pre_domain_create              = NULL,
-       .post_domain_create             = NULL,
-       .fail_domain_create             = NULL,
-       .post_domain_destroy            = NULL,
-       /* event channel control hooks */
-       .pre_eventchannel_unbound       = NULL,
-       .fail_eventchannel_unbound      = NULL,
-       .pre_eventchannel_interdomain   = NULL,
-       .fail_eventchannel_interdomain  = NULL,
-       /* grant table control hooks */
-       .pre_grant_map_ref              = NULL,
-       .fail_grant_map_ref             = NULL,
-       .pre_grant_setup                = NULL,
-       .fail_grant_setup               = NULL
+    .init_domain_ssid = null_init_domain_ssid,
+    .free_domain_ssid = null_free_domain_ssid,
+    .dump_binary_policy = null_dump_binary_policy,
+    .set_binary_policy = null_set_binary_policy,
+    .dump_statistics = null_dump_stats,
+    .dump_ssid_types = null_dump_ssid_types,
+    /* domain management control hooks */
+    .pre_domain_create = NULL,
+    .post_domain_create = NULL,
+    .fail_domain_create = NULL,
+    .post_domain_destroy = NULL,
+    /* event channel control hooks */
+    .pre_eventchannel_unbound = NULL,
+    .fail_eventchannel_unbound = NULL,
+    .pre_eventchannel_interdomain = NULL,
+    .fail_eventchannel_interdomain = NULL,
+    /* grant table control hooks */
+    .pre_grant_map_ref = NULL,
+    .fail_grant_map_ref = NULL,
+    .pre_grant_setup = NULL,
+    .fail_grant_setup = NULL
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/acm/acm_policy.c
--- a/xen/acm/acm_policy.c      Fri Oct 21 19:58:39 2005
+++ b/xen/acm/acm_policy.c      Mon Oct 24 15:08:13 2005
@@ -32,165 +32,166 @@
 #include <acm/acm_endian.h>
 
 int
-acm_set_policy(void *buf, u16 buf_size, int isuserbuffer)
+acm_set_policy(void *buf, u32 buf_size, int isuserbuffer)
 {
-       u8 *policy_buffer = NULL;
-       struct acm_policy_buffer *pol;
-       
+    u8 *policy_buffer = NULL;
+    struct acm_policy_buffer *pol;
+ 
     if (buf_size < sizeof(struct acm_policy_buffer))
-               return -EFAULT;
-
-       /* 1. copy buffer from domain */
-       if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-       if (isuserbuffer) {
-               if (copy_from_user(policy_buffer, buf, buf_size))
+        return -EFAULT;
+
+    /* 1. copy buffer from domain */
+    if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    if (isuserbuffer) {
+        if (copy_from_user(policy_buffer, buf, buf_size))
         {
-                       printk("%s: Error copying!\n",__func__);
-                       goto error_free;
-               }
-       } else
-               memcpy(policy_buffer, buf, buf_size);
-
-       /* 2. some sanity checking */
-       pol = (struct acm_policy_buffer *)policy_buffer;
-
-       if ((ntohl(pol->magic) != ACM_MAGIC) || 
-           (ntohl(pol->policy_version) != ACM_POLICY_VERSION) ||
-           (ntohl(pol->primary_policy_code) != 
acm_bin_pol.primary_policy_code) ||
-           (ntohl(pol->secondary_policy_code) != 
acm_bin_pol.secondary_policy_code))
+            printk("%s: Error copying!\n",__func__);
+            goto error_free;
+        }
+    } else
+        memcpy(policy_buffer, buf, buf_size);
+
+    /* 2. some sanity checking */
+    pol = (struct acm_policy_buffer *)policy_buffer;
+
+    if ((ntohl(pol->magic) != ACM_MAGIC) || 
+        (ntohl(pol->policy_version) != ACM_POLICY_VERSION) ||
+        (ntohl(pol->primary_policy_code) != acm_bin_pol.primary_policy_code) ||
+        (ntohl(pol->secondary_policy_code) != 
acm_bin_pol.secondary_policy_code))
     {
-               printkd("%s: Wrong policy magics or versions!\n", __func__);
-               goto error_free;
-       }
-       if (buf_size != ntohl(pol->len))
+        printkd("%s: Wrong policy magics or versions!\n", __func__);
+        goto error_free;
+    }
+    if (buf_size != ntohl(pol->len))
     {
-               printk("%s: ERROR in buf size.\n", __func__);
-               goto error_free;
-       }
-
-       /* get bin_policy lock and rewrite policy (release old one) */
-       write_lock(&acm_bin_pol_rwlock);
-
-       /* 3. set primary policy data */
-       if (acm_primary_ops->set_binary_policy(buf + 
ntohl(pol->primary_buffer_offset),
-                                               
ntohl(pol->secondary_buffer_offset) -
-                                              
ntohl(pol->primary_buffer_offset)))
-               goto error_lock_free;
-
-       /* 4. set secondary policy data */
-       if (acm_secondary_ops->set_binary_policy(buf + 
ntohl(pol->secondary_buffer_offset),
-                                                ntohl(pol->len) - 
-                                                
ntohl(pol->secondary_buffer_offset)))
-               goto error_lock_free;
-
-       write_unlock(&acm_bin_pol_rwlock);
-       xfree(policy_buffer);
-       return ACM_OK;
+        printk("%s: ERROR in buf size.\n", __func__);
+        goto error_free;
+    }
+
+    /* get bin_policy lock and rewrite policy (release old one) */
+    write_lock(&acm_bin_pol_rwlock);
+
+    /* 3. set primary policy data */
+    if (acm_primary_ops->set_binary_policy(buf + 
ntohl(pol->primary_buffer_offset),
+                                           ntohl(pol->secondary_buffer_offset) 
-
+                                           ntohl(pol->primary_buffer_offset)))
+        goto error_lock_free;
+
+    /* 4. set secondary policy data */
+    if (acm_secondary_ops->set_binary_policy(buf + 
ntohl(pol->secondary_buffer_offset),
+                                             ntohl(pol->len) - 
+                                             
ntohl(pol->secondary_buffer_offset)))
+        goto error_lock_free;
+
+    write_unlock(&acm_bin_pol_rwlock);
+    xfree(policy_buffer);
+    return ACM_OK;
 
  error_lock_free:
-       write_unlock(&acm_bin_pol_rwlock);
+    write_unlock(&acm_bin_pol_rwlock);
  error_free:
-       printk("%s: Error setting policy.\n", __func__);
-    xfree(policy_buffer);
-       return -EFAULT;
-}
-
-int
-acm_get_policy(void *buf, u16 buf_size)
-{      
-     u8 *policy_buffer;
-     int ret;
-     struct acm_policy_buffer *bin_pol;
-       
+    printk("%s: Error setting policy.\n", __func__);
+    xfree(policy_buffer);
+    return -EFAULT;
+}
+
+int
+acm_get_policy(void *buf, u32 buf_size)
+{ 
+    u8 *policy_buffer;
+    int ret;
+    struct acm_policy_buffer *bin_pol;
+ 
     if (buf_size < sizeof(struct acm_policy_buffer))
-               return -EFAULT;
-
-     if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-     read_lock(&acm_bin_pol_rwlock);
-
-     bin_pol = (struct acm_policy_buffer *)policy_buffer;
-     bin_pol->magic = htonl(ACM_MAGIC);
-     bin_pol->primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
-     bin_pol->secondary_policy_code = htonl(acm_bin_pol.secondary_policy_code);
-
-     bin_pol->len = htonl(sizeof(struct acm_policy_buffer));
-     bin_pol->primary_buffer_offset = htonl(ntohl(bin_pol->len));
-     bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
+        return -EFAULT;
+
+    if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    read_lock(&acm_bin_pol_rwlock);
+
+    bin_pol = (struct acm_policy_buffer *)policy_buffer;
+    bin_pol->magic = htonl(ACM_MAGIC);
+    bin_pol->primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
+    bin_pol->secondary_policy_code = htonl(acm_bin_pol.secondary_policy_code);
+
+    bin_pol->len = htonl(sizeof(struct acm_policy_buffer));
+    bin_pol->primary_buffer_offset = htonl(ntohl(bin_pol->len));
+    bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
      
-     ret = acm_primary_ops->dump_binary_policy (policy_buffer + 
ntohl(bin_pol->primary_buffer_offset),
-                                      buf_size - 
ntohl(bin_pol->primary_buffer_offset));
-     if (ret < 0)
-         goto error_free_unlock;
-
-     bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
-     bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
-
-     ret = acm_secondary_ops->dump_binary_policy(policy_buffer + 
ntohl(bin_pol->secondary_buffer_offset),
-                                   buf_size - 
ntohl(bin_pol->secondary_buffer_offset));
-     if (ret < 0)
-         goto error_free_unlock;
-
-     bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
-     if (copy_to_user(buf, policy_buffer, ntohl(bin_pol->len)))
-            goto error_free_unlock;
-
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(policy_buffer);
-     return ACM_OK;
+    ret = acm_primary_ops->dump_binary_policy (policy_buffer + 
ntohl(bin_pol->primary_buffer_offset),
+                                               buf_size - 
ntohl(bin_pol->primary_buffer_offset));
+    if (ret < 0)
+        goto error_free_unlock;
+
+    bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+    bin_pol->secondary_buffer_offset = htonl(ntohl(bin_pol->len));
+
+    ret = acm_secondary_ops->dump_binary_policy(policy_buffer + 
ntohl(bin_pol->secondary_buffer_offset),
+                                                buf_size - 
ntohl(bin_pol->secondary_buffer_offset));
+    if (ret < 0)
+        goto error_free_unlock;
+
+    bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+    if (copy_to_user(buf, policy_buffer, ntohl(bin_pol->len)))
+        goto error_free_unlock;
+
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(policy_buffer);
+    return ACM_OK;
 
  error_free_unlock:
-     read_unlock(&acm_bin_pol_rwlock);
-     printk("%s: Error getting policy.\n", __func__);
-     xfree(policy_buffer);
-     return -EFAULT;
+    read_unlock(&acm_bin_pol_rwlock);
+    printk("%s: Error getting policy.\n", __func__);
+    xfree(policy_buffer);
+    return -EFAULT;
 }
 
 int
 acm_dump_statistics(void *buf, u16 buf_size)
-{      
+{ 
     /* send stats to user space */
-     u8 *stats_buffer;
-     int len1, len2;
-     struct acm_stats_buffer acm_stats;
-
-     if ((stats_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-     read_lock(&acm_bin_pol_rwlock);
+    u8 *stats_buffer;
+    int len1, len2;
+    struct acm_stats_buffer acm_stats;
+
+    if ((stats_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    read_lock(&acm_bin_pol_rwlock);
      
-     len1 = acm_primary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer),
-                                            buf_size - sizeof(struct 
acm_stats_buffer));
-     if (len1 < 0)
-            goto error_lock_free;
-            
-     len2 = acm_secondary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer) + len1,
-                                              buf_size - sizeof(struct 
acm_stats_buffer) - len1);
-     if (len2 < 0)
-            goto error_lock_free;
-
-     acm_stats.magic = htonl(ACM_MAGIC);
-     acm_stats.primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
-     acm_stats.secondary_policy_code = 
htonl(acm_bin_pol.secondary_policy_code);
-     acm_stats.primary_stats_offset = htonl(sizeof(struct acm_stats_buffer));
-     acm_stats.secondary_stats_offset = htonl(sizeof(struct acm_stats_buffer) 
+ len1);
-     acm_stats.len = htonl(sizeof(struct acm_stats_buffer) + len1 + len2);
-     memcpy(stats_buffer, &acm_stats, sizeof(struct acm_stats_buffer));
-
-     if (copy_to_user(buf, stats_buffer, sizeof(struct acm_stats_buffer) + 
len1 + len2))
-            goto error_lock_free;
-
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(stats_buffer);
-     return ACM_OK;
+    len1 = acm_primary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer),
+                                            buf_size - sizeof(struct 
acm_stats_buffer));
+    if (len1 < 0)
+        goto error_lock_free;
+      
+    len2 = acm_secondary_ops->dump_statistics(stats_buffer + sizeof(struct 
acm_stats_buffer) + len1,
+                                              buf_size - sizeof(struct 
acm_stats_buffer) - len1);
+    if (len2 < 0)
+        goto error_lock_free;
+
+    acm_stats.magic = htonl(ACM_MAGIC);
+    acm_stats.primary_policy_code = htonl(acm_bin_pol.primary_policy_code);
+    acm_stats.secondary_policy_code = htonl(acm_bin_pol.secondary_policy_code);
+    acm_stats.primary_stats_offset = htonl(sizeof(struct acm_stats_buffer));
+    acm_stats.secondary_stats_offset = htonl(sizeof(struct acm_stats_buffer) + 
len1);
+    acm_stats.len = htonl(sizeof(struct acm_stats_buffer) + len1 + len2);
+
+    memcpy(stats_buffer, &acm_stats, sizeof(struct acm_stats_buffer));
+
+    if (copy_to_user(buf, stats_buffer, sizeof(struct acm_stats_buffer) + len1 
+ len2))
+        goto error_lock_free;
+
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(stats_buffer);
+    return ACM_OK;
 
  error_lock_free:
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(stats_buffer);
-     return -EFAULT;
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(stats_buffer);
+    return -EFAULT;
 }
 
 
@@ -198,57 +199,88 @@
 acm_get_ssid(ssidref_t ssidref, u8 *buf, u16 buf_size)
 {
     /* send stats to user space */
-     u8 *ssid_buffer;
-     int ret;
-     struct acm_ssid_buffer *acm_ssid;
-     if (buf_size < sizeof(struct acm_ssid_buffer))
-               return -EFAULT;
-
-     if ((ssid_buffer = xmalloc_array(u8, buf_size)) == NULL)
-           return -ENOMEM;
-
-     read_lock(&acm_bin_pol_rwlock);
-
-     acm_ssid = (struct acm_ssid_buffer *)ssid_buffer;
-     acm_ssid->len = sizeof(struct acm_ssid_buffer);
-     acm_ssid->ssidref = ssidref;
-     acm_ssid->primary_policy_code = acm_bin_pol.primary_policy_code;
-     acm_ssid->secondary_policy_code = acm_bin_pol.secondary_policy_code;
-     acm_ssid->primary_types_offset = acm_ssid->len;
-
-     /* ret >= 0 --> ret == max_types */
-     ret = acm_primary_ops->dump_ssid_types(ACM_PRIMARY(ssidref),
-                                            ssid_buffer + 
acm_ssid->primary_types_offset,
-                                            buf_size - 
acm_ssid->primary_types_offset);
-     if (ret < 0)
-         goto error_free_unlock;
-
-     acm_ssid->len += ret;
-     acm_ssid->primary_max_types = ret;
-
-     acm_ssid->secondary_types_offset = acm_ssid->len;
-
-     ret = acm_secondary_ops->dump_ssid_types(ACM_SECONDARY(ssidref),
-                                              ssid_buffer + 
acm_ssid->secondary_types_offset,
-                                              buf_size - 
acm_ssid->secondary_types_offset);
-     if (ret < 0)
-         goto error_free_unlock;
-
-     acm_ssid->len += ret;
-     acm_ssid->secondary_max_types = ret;
-
-     if (copy_to_user(buf, ssid_buffer, acm_ssid->len))
-            goto error_free_unlock;
-
-     read_unlock(&acm_bin_pol_rwlock);
-     xfree(ssid_buffer);
-     return ACM_OK;
+    u8 *ssid_buffer;
+    int ret;
+    struct acm_ssid_buffer *acm_ssid;
+    if (buf_size < sizeof(struct acm_ssid_buffer))
+        return -EFAULT;
+
+    if ((ssid_buffer = xmalloc_array(u8, buf_size)) == NULL)
+        return -ENOMEM;
+
+    read_lock(&acm_bin_pol_rwlock);
+
+    acm_ssid = (struct acm_ssid_buffer *)ssid_buffer;
+    acm_ssid->len = sizeof(struct acm_ssid_buffer);
+    acm_ssid->ssidref = ssidref;
+    acm_ssid->primary_policy_code = acm_bin_pol.primary_policy_code;
+    acm_ssid->secondary_policy_code = acm_bin_pol.secondary_policy_code;
+    acm_ssid->primary_types_offset = acm_ssid->len;
+
+    /* ret >= 0 --> ret == max_types */
+    ret = acm_primary_ops->dump_ssid_types(ACM_PRIMARY(ssidref),
+                                           ssid_buffer + 
acm_ssid->primary_types_offset,
+                                           buf_size - 
acm_ssid->primary_types_offset);
+    if (ret < 0)
+        goto error_free_unlock;
+
+    acm_ssid->len += ret;
+    acm_ssid->primary_max_types = ret;
+    acm_ssid->secondary_types_offset = acm_ssid->len;
+
+    ret = acm_secondary_ops->dump_ssid_types(ACM_SECONDARY(ssidref),
+                                             ssid_buffer + 
acm_ssid->secondary_types_offset,
+                                             buf_size - 
acm_ssid->secondary_types_offset);
+    if (ret < 0)
+        goto error_free_unlock;
+
+    acm_ssid->len += ret;
+    acm_ssid->secondary_max_types = ret;
+
+    if (copy_to_user(buf, ssid_buffer, acm_ssid->len))
+        goto error_free_unlock;
+
+    read_unlock(&acm_bin_pol_rwlock);
+    xfree(ssid_buffer);
+    return ACM_OK;
 
  error_free_unlock:
-     read_unlock(&acm_bin_pol_rwlock);
-     printk("%s: Error getting ssid.\n", __func__);
-     xfree(ssid_buffer);
-     return -ENOMEM;
-}
-
-/*eof*/
+    read_unlock(&acm_bin_pol_rwlock);
+    printk("%s: Error getting ssid.\n", __func__);
+    xfree(ssid_buffer);
+    return -ENOMEM;
+}
+
+int
+acm_get_decision(ssidref_t ssidref1, ssidref_t ssidref2,
+                 enum acm_hook_type hook)
+{
+    int ret = ACM_ACCESS_DENIED;
+    switch (hook) {
+
+    case SHARING:
+        /* SHARING Hook restricts access in STE policy only */
+        ret = acm_sharing(ssidref1, ssidref2);
+        break;
+
+    default:
+        /* deny */
+        break;
+    }
+
+    printkd("%s: ssid1=%x, ssid2=%x, decision=%s.\n",
+            __func__, ssidref1, ssidref2,
+            (ret == ACM_ACCESS_PERMITTED) ? "GRANTED" : "DENIED");
+
+    return ret;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/acm/acm_simple_type_enforcement_hooks.c
--- a/xen/acm/acm_simple_type_enforcement_hooks.c       Fri Oct 21 19:58:39 2005
+++ b/xen/acm/acm_simple_type_enforcement_hooks.c       Mon Oct 24 15:08:13 2005
@@ -24,6 +24,7 @@
  *     share at least on common type.
  *
  */
+
 #include <xen/lib.h>
 #include <asm/types.h>
 #include <asm/current.h>
@@ -35,34 +36,34 @@
 struct ste_binary_policy ste_bin_pol;
 
 static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
-       int i;
-       for(i=0; i< ste_bin_pol.max_types; i++)
-               if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] && 
-                    ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
-                       printkd("%s: common type #%02x.\n", __func__, i);
-                       return 1;
-               }
-       return 0;
+    int i;
+    for(i=0; i< ste_bin_pol.max_types; i++)
+        if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] && 
+             ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
+            printkd("%s: common type #%02x.\n", __func__, i);
+            return 1;
+        }
+    return 0;
 }
 
 /* Helper function: return = (subj and obj share a common type) */
 static int share_common_type(struct domain *subj, struct domain *obj)
 {
-       ssidref_t ref_s, ref_o;
-       int ret;
-
-       if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || 
(obj->ssid == NULL))
-               return 0;
-       read_lock(&acm_bin_pol_rwlock);
-       /* lookup the policy-local ssids */
-       ref_s = ((struct ste_ssid 
*)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                   (struct acm_ssid_domain 
*)subj->ssid)))->ste_ssidref;
-       ref_o = ((struct ste_ssid 
*)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                   (struct acm_ssid_domain 
*)obj->ssid)))->ste_ssidref;
-        /* check whether subj and obj share a common ste type */
-       ret = have_common_type(ref_s, ref_o);
-       read_unlock(&acm_bin_pol_rwlock);
-       return ret;
+    ssidref_t ref_s, ref_o;
+    int ret;
+
+    if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid 
== NULL))
+        return 0;
+    read_lock(&acm_bin_pol_rwlock);
+    /* lookup the policy-local ssids */
+    ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                           (struct acm_ssid_domain 
*)subj->ssid)))->ste_ssidref;
+    ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                           (struct acm_ssid_domain 
*)obj->ssid)))->ste_ssidref;
+    /* check whether subj and obj share a common ste type */
+    ret = have_common_type(ref_s, ref_o);
+    read_unlock(&acm_bin_pol_rwlock);
+    return ret;
 }
 
 /*
@@ -71,26 +72,26 @@
  */
 int acm_init_ste_policy(void)
 {
-       /* minimal startup policy; policy write-locked already */
-       ste_bin_pol.max_types = 1;
-       ste_bin_pol.max_ssidrefs = 2;
-       ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 2);
-       memset(ste_bin_pol.ssidrefs, 0, 2);
-
-       if (ste_bin_pol.ssidrefs == NULL)
-               return ACM_INIT_SSID_ERROR;
-
-       /* initialize state so that dom0 can start up and communicate with 
itself */
-       ste_bin_pol.ssidrefs[1] = 1;
-
-       /* init stats */
-       atomic_set(&(ste_bin_pol.ec_eval_count), 0);
-       atomic_set(&(ste_bin_pol.ec_denied_count), 0); 
-       atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
-       atomic_set(&(ste_bin_pol.gt_eval_count), 0);
-       atomic_set(&(ste_bin_pol.gt_denied_count), 0); 
-       atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
-       return ACM_OK;
+    /* minimal startup policy; policy write-locked already */
+    ste_bin_pol.max_types = 1;
+    ste_bin_pol.max_ssidrefs = 2;
+    ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 2);
+    memset(ste_bin_pol.ssidrefs, 0, 2);
+
+    if (ste_bin_pol.ssidrefs == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+ /* initialize state so that dom0 can start up and communicate with itself */
+    ste_bin_pol.ssidrefs[1] = 1;
+
+    /* init stats */
+    atomic_set(&(ste_bin_pol.ec_eval_count), 0);
+    atomic_set(&(ste_bin_pol.ec_denied_count), 0); 
+    atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
+    atomic_set(&(ste_bin_pol.gt_eval_count), 0);
+    atomic_set(&(ste_bin_pol.gt_denied_count), 0); 
+    atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
+    return ACM_OK;
 }
 
 
@@ -98,62 +99,68 @@
 static int
 ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
 {
-       int i;
-       struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid); 
-       traceprintk("%s.\n", __func__);
-
-       if (ste_ssidp == NULL)
-               return ACM_INIT_SSID_ERROR;
-
-       /* get policy-local ssid reference */
-       ste_ssidp->ste_ssidref = 
GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
-       if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
-           (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
-               printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
-                       __func__, ste_ssidp->ste_ssidref);
-               xfree(ste_ssidp);
-               return ACM_INIT_SSID_ERROR;
-       }
-       /* clean ste cache */
-       for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-               ste_ssidp->ste_cache[i].valid = FREE;
-
-       (*ste_ssid) = ste_ssidp;
-       printkd("%s: determined ste_ssidref to %x.\n", 
-              __func__, ste_ssidp->ste_ssidref);
-       return ACM_OK;
+    int i;
+    struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid); 
+    traceprintk("%s.\n", __func__);
+
+    if (ste_ssidp == NULL)
+        return ACM_INIT_SSID_ERROR;
+
+    /* get policy-local ssid reference */
+    ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
ssidref);
+    if ((ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) ||
+        (ste_ssidp->ste_ssidref == ACM_DEFAULT_LOCAL_SSID)) {
+        printkd("%s: ERROR ste_ssidref (%x) undefined or unset (0).\n",
+                __func__, ste_ssidp->ste_ssidref);
+        xfree(ste_ssidp);
+        return ACM_INIT_SSID_ERROR;
+    }
+    /* clean ste cache */
+    for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+        ste_ssidp->ste_cache[i].valid = FREE;
+
+    (*ste_ssid) = ste_ssidp;
+    printkd("%s: determined ste_ssidref to %x.\n", 
+            __func__, ste_ssidp->ste_ssidref);
+    return ACM_OK;
 }
 
 
 static void
 ste_free_domain_ssid(void *ste_ssid)
 {
-       traceprintk("%s.\n", __func__);
-       if (ste_ssid != NULL)
-               xfree(ste_ssid);
-       return;
+    traceprintk("%s.\n", __func__);
+    if (ste_ssid != NULL)
+        xfree(ste_ssid);
+    return;
 }
 
 /* dump type enforcement cache; policy read-locked already */
 static int 
-ste_dump_policy(u8 *buf, u16 buf_size) {
-     struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
-     int ret = 0;
-
-     ste_buf->ste_max_types = htonl(ste_bin_pol.max_types);
-     ste_buf->ste_max_ssidrefs = htonl(ste_bin_pol.max_ssidrefs);
-     ste_buf->policy_code = htonl(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
-     ste_buf->ste_ssid_offset = htonl(sizeof(struct acm_ste_policy_buffer));
-     ret = ntohl(ste_buf->ste_ssid_offset) +
-            
sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
-
-     /* now copy buffer over */
-     arrcpy(buf + ntohl(ste_buf->ste_ssid_offset),
-           ste_bin_pol.ssidrefs,
-           sizeof(domaintype_t),
-             ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
-
-     return ret;
+ste_dump_policy(u8 *buf, u32 buf_size) {
+    struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
+    int ret = 0;
+
+    if (buf_size < sizeof(struct acm_ste_policy_buffer))
+        return -EINVAL;
+
+    ste_buf->ste_max_types = htonl(ste_bin_pol.max_types);
+    ste_buf->ste_max_ssidrefs = htonl(ste_bin_pol.max_ssidrefs);
+    ste_buf->policy_code = htonl(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
+    ste_buf->ste_ssid_offset = htonl(sizeof(struct acm_ste_policy_buffer));
+    ret = ntohl(ste_buf->ste_ssid_offset) +
+        sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
+
+    if (buf_size < ret)
+        return -EINVAL;
+
+    /* now copy buffer over */
+    arrcpy(buf + ntohl(ste_buf->ste_ssid_offset),
+           ste_bin_pol.ssidrefs,
+           sizeof(domaintype_t),
+           ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
+
+    return ret;
 }
 
 /* ste_init_state is called when a policy is changed to detect violations 
(return != 0).
@@ -176,83 +183,83 @@
     /* go through all domains and adjust policy as if this domain was started 
now */
     pd = &domain_list;
     for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-           ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                (struct acm_ssid_domain *)(*pd)->ssid);
-           ste_ssidref = ste_ssid->ste_ssidref;
-           traceprintk("%s: validating policy for eventch domain %x 
(ste-Ref=%x).\n",
-                   __func__, (*pd)->domain_id, ste_ssidref);
-           /* a) check for event channel conflicts */
-           for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
-                   spin_lock(&(*pd)->evtchn_lock);
-                   if ((*pd)->evtchn[port] == NULL) {
-                            spin_unlock(&(*pd)->evtchn_lock);
-                           continue;
-                   }
-                   if ((*pd)->evtchn[port]->state == ECS_INTERDOMAIN) {
-                           rdom = 
(*pd)->evtchn[port]->u.interdomain.remote_dom;
-                           rdomid = rdom->domain_id;
-                           /* rdom now has remote domain */
-                           ste_rssid = 
GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                                 (struct acm_ssid_domain 
*)(rdom->ssid));
-                           ste_rssidref = ste_rssid->ste_ssidref;
-                   } else if ((*pd)->evtchn[port]->state == ECS_UNBOUND) {
-                           rdomid = 
(*pd)->evtchn[port]->u.unbound.remote_domid;
-                           if ((rdom = find_domain_by_id(rdomid)) == NULL) {
-                                   printk("%s: Error finding domain to id 
%x!\n", __func__, rdomid);
-                                   goto out;
-                           }
-                           /* rdom now has remote domain */
-                           ste_rssid = 
GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                                 (struct acm_ssid_domain 
*)(rdom->ssid));
-                           ste_rssidref = ste_rssid->ste_ssidref;
-                           put_domain(rdom);
-                   } else {
-                           spin_unlock(&(*pd)->evtchn_lock);
-                           continue; /* port unused */
-                   }
-                   spin_unlock(&(*pd)->evtchn_lock);
-
-                   /* rdom now has remote domain */
-                   ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                            (struct acm_ssid_domain 
*)(rdom->ssid));
-                   ste_rssidref = ste_rssid->ste_ssidref;
-                   traceprintk("%s: eventch: domain %x (ssidref %x) --> domain 
%x (rssidref %x) used (port %x).\n", 
-                           __func__, (*pd)->domain_id, ste_ssidref, 
rdom->domain_id, ste_rssidref, port);  
-                   /* check whether on subj->ssid, obj->ssid share a common 
type*/
-                   if (!have_common_type(ste_ssidref, ste_rssidref)) {
-                           printkd("%s: Policy violation in event channel 
domain %x -> domain %x.\n",
-                                   __func__, (*pd)->domain_id, rdomid);
-                           goto out;
-                   }
-           }   
-           /* b) check for grant table conflicts on shared pages */
-           if ((*pd)->grant_table->shared == NULL) {
-                   printkd("%s: Grant ... sharing for domain %x not setup!\n", 
__func__, (*pd)->domain_id);
-                   continue;
-           }
-           for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) {
-                   sha_copy =  (*pd)->grant_table->shared[i];
-                   if ( sha_copy.flags ) {
-                           printkd("%s: grant dom (%hu) SHARED (%d) 
flags:(%hx) dom:(%hu) frame:(%lx)\n",
-                                   __func__, (*pd)->domain_id, i, 
sha_copy.flags, sha_copy.domid, 
-                                   (unsigned long)sha_copy.frame);
-                           rdomid = sha_copy.domid;
-                           if ((rdom = find_domain_by_id(rdomid)) == NULL) {
-                                   printkd("%s: domain not found ERROR!\n", 
__func__);
-                                   goto out;
-                           };
-                           /* rdom now has remote domain */
-                           ste_rssid = 
GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                                                 (struct acm_ssid_domain 
*)(rdom->ssid));
-                           ste_rssidref = ste_rssid->ste_ssidref;
-                           put_domain(rdom);
-                           if (!have_common_type(ste_ssidref, ste_rssidref)) {
-                                   printkd("%s: Policy violation in grant 
table sharing domain %x -> domain %x.\n",
-                                           __func__, (*pd)->domain_id, rdomid);
-                                   goto out;
-                           }
-                   }
-           }
+        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                             (struct acm_ssid_domain *)(*pd)->ssid);
+        ste_ssidref = ste_ssid->ste_ssidref;
+        traceprintk("%s: validating policy for eventch domain %x 
(ste-Ref=%x).\n",
+                    __func__, (*pd)->domain_id, ste_ssidref);
+        /* a) check for event channel conflicts */
+        for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
+            spin_lock(&(*pd)->evtchn_lock);
+            if ((*pd)->evtchn[port] == NULL) {
+                spin_unlock(&(*pd)->evtchn_lock);
+                continue;
+            }
+            if ((*pd)->evtchn[port]->state == ECS_INTERDOMAIN) {
+                rdom = (*pd)->evtchn[port]->u.interdomain.remote_dom;
+                rdomid = rdom->domain_id;
+                /* rdom now has remote domain */
+                ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                      (struct acm_ssid_domain *)(rdom->ssid));
+                ste_rssidref = ste_rssid->ste_ssidref;
+            } else if ((*pd)->evtchn[port]->state == ECS_UNBOUND) {
+                rdomid = (*pd)->evtchn[port]->u.unbound.remote_domid;
+                if ((rdom = find_domain_by_id(rdomid)) == NULL) {
+                    printk("%s: Error finding domain to id %x!\n", __func__, 
rdomid);
+                    goto out;
+                }
+                /* rdom now has remote domain */
+                ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                      (struct acm_ssid_domain *)(rdom->ssid));
+                ste_rssidref = ste_rssid->ste_ssidref;
+                put_domain(rdom);
+            } else {
+                spin_unlock(&(*pd)->evtchn_lock);
+                continue; /* port unused */
+            }
+            spin_unlock(&(*pd)->evtchn_lock);
+
+            /* rdom now has remote domain */
+            ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                  (struct acm_ssid_domain *)(rdom->ssid));
+            ste_rssidref = ste_rssid->ste_ssidref;
+            traceprintk("%s: eventch: domain %x (ssidref %x) --> domain %x 
(rssidref %x) used (port %x).\n", 
+                        __func__, (*pd)->domain_id, ste_ssidref, 
rdom->domain_id, ste_rssidref, port);  
+            /* check whether on subj->ssid, obj->ssid share a common type*/
+            if (!have_common_type(ste_ssidref, ste_rssidref)) {
+                printkd("%s: Policy violation in event channel domain %x -> 
domain %x.\n",
+                        __func__, (*pd)->domain_id, rdomid);
+                goto out;
+            }
+        } 
+        /* b) check for grant table conflicts on shared pages */
+        if ((*pd)->grant_table->shared == NULL) {
+            printkd("%s: Grant ... sharing for domain %x not setup!\n", 
__func__, (*pd)->domain_id);
+            continue;
+        }
+        for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) {
+            sha_copy =  (*pd)->grant_table->shared[i];
+            if ( sha_copy.flags ) {
+                printkd("%s: grant dom (%hu) SHARED (%d) flags:(%hx) dom:(%hu) 
frame:(%lx)\n",
+                        __func__, (*pd)->domain_id, i, sha_copy.flags, 
sha_copy.domid, 
+                        (unsigned long)sha_copy.frame);
+                rdomid = sha_copy.domid;
+                if ((rdom = find_domain_by_id(rdomid)) == NULL) {
+                    printkd("%s: domain not found ERROR!\n", __func__);
+                    goto out;
+                };
+                /* rdom now has remote domain */
+                ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                                      (struct acm_ssid_domain *)(rdom->ssid));
+                ste_rssidref = ste_rssid->ste_ssidref;
+                put_domain(rdom);
+                if (!have_common_type(ste_ssidref, ste_rssidref)) {
+                    printkd("%s: Policy violation in grant table sharing 
domain %x -> domain %x.\n",
+                            __func__, (*pd)->domain_id, rdomid);
+                    goto out;
+                }
+            }
+        }
     }
     violation = 0;
  out:
@@ -267,110 +274,78 @@
 
 /* set new policy; policy write-locked already */
 static int
-ste_set_policy(u8 *buf, u16 buf_size) 
-{
-     struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
-     void *ssidrefsbuf;
-     struct ste_ssid *ste_ssid;
-     struct domain **pd;
-     int i;
-
-     /* Convert endianess of policy */
-     ste_buf->policy_code = ntohl(ste_buf->policy_code);
-     ste_buf->policy_version = ntohl(ste_buf->policy_version);
-     ste_buf->ste_max_types = ntohl(ste_buf->ste_max_types);
-     ste_buf->ste_max_ssidrefs = ntohl(ste_buf->ste_max_ssidrefs);
-     ste_buf->ste_ssid_offset = ntohl(ste_buf->ste_ssid_offset);
-
-     /* policy type and version checks */
-     if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
-        (ste_buf->policy_version != ACM_STE_VERSION))
-            return -EINVAL;
-
-     /* 1. create and copy-in new ssidrefs buffer */
-     ssidrefsbuf = xmalloc_array(u8, 
sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
-     if (ssidrefsbuf == NULL) {
-            return -ENOMEM;
-     }
-     if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * 
ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
-         goto error_free;
-
-     arrcpy(ssidrefsbuf, 
-            buf + ste_buf->ste_ssid_offset,
-            sizeof(domaintype_t),
-           ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
-
-     /* 2. now re-calculate sharing decisions based on running domains; 
-      *    this can fail if new policy is conflicting with sharing of running 
domains 
-      *    now: reject violating new policy; future: adjust sharing through 
revoking sharing */
-     if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
-            printk("%s: New policy conflicts with running domains. Policy load 
aborted.\n", __func__);
-            goto error_free; /* new policy conflicts with sharing of running 
domains */
-     }
-     /* 3. replace old policy (activate new policy) */
-     ste_bin_pol.max_types = ste_buf->ste_max_types;
-     ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
-     if (ste_bin_pol.ssidrefs) 
-            xfree(ste_bin_pol.ssidrefs);
-     ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
-
-     /* clear all ste caches */
-     read_lock(&domlist_lock);
-     pd = &domain_list;
-     for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(*pd)->ssid);
-        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-               ste_ssid->ste_cache[i].valid = FREE;
-     }
-     read_unlock(&domlist_lock);
-     return ACM_OK;
-
-error_free:
-       printk("%s: ERROR setting policy.\n", __func__);
-       if (ssidrefsbuf != NULL) xfree(ssidrefsbuf);
-       return -EFAULT;
+ste_set_policy(u8 *buf, u32 buf_size)
+{
+    struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer 
*)buf;
+    void *ssidrefsbuf;
+    struct ste_ssid *ste_ssid;
+    struct domain **pd;
+    int i;
+
+    if (buf_size < sizeof(struct acm_ste_policy_buffer))
+        return -EINVAL;
+
+    /* Convert endianess of policy */
+    ste_buf->policy_code = ntohl(ste_buf->policy_code);
+    ste_buf->policy_version = ntohl(ste_buf->policy_version);
+    ste_buf->ste_max_types = ntohl(ste_buf->ste_max_types);
+    ste_buf->ste_max_ssidrefs = ntohl(ste_buf->ste_max_ssidrefs);
+    ste_buf->ste_ssid_offset = ntohl(ste_buf->ste_ssid_offset);
+
+    /* policy type and version checks */
+    if ((ste_buf->policy_code != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ||
+        (ste_buf->policy_version != ACM_STE_VERSION))
+        return -EINVAL;
+
+    /* 1. create and copy-in new ssidrefs buffer */
+    ssidrefsbuf = xmalloc_array(u8, 
sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
+    if (ssidrefsbuf == NULL) {
+        return -ENOMEM;
+    }
+    if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * 
ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
+        goto error_free;
+
+    arrcpy(ssidrefsbuf, 
+           buf + ste_buf->ste_ssid_offset,
+           sizeof(domaintype_t),
+           ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
+
+    /* 2. now re-calculate sharing decisions based on running domains; 
+     *    this can fail if new policy is conflicting with sharing of running 
domains 
+     *    now: reject violating new policy; future: adjust sharing through 
revoking sharing */
+    if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
+        printk("%s: New policy conflicts with running domains. Policy load 
aborted.\n", __func__);
+        goto error_free; /* new policy conflicts with sharing of running 
domains */
+    }
+    /* 3. replace old policy (activate new policy) */
+    ste_bin_pol.max_types = ste_buf->ste_max_types;
+    ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
+    if (ste_bin_pol.ssidrefs) 
+        xfree(ste_bin_pol.ssidrefs);
+    ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
+
+    /* clear all ste caches */
+    read_lock(&domlist_lock);
+    pd = &domain_list;
+    for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
+        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                             (struct acm_ssid_domain *)(*pd)->ssid);
+        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+            ste_ssid->ste_cache[i].valid = FREE;
+    }
+    read_unlock(&domlist_lock);
+    return ACM_OK;
+
+ error_free:
+    printk("%s: ERROR setting policy.\n", __func__);
+    if (ssidrefsbuf != NULL) xfree(ssidrefsbuf);
+    return -EFAULT;
 }
 
 static int 
 ste_dump_stats(u8 *buf, u16 buf_len)
 {
     struct acm_ste_stats_buffer stats;
-
-#ifdef ACM_DEBUG
-    int i;
-    struct ste_ssid *ste_ssid;
-    struct domain **pd;
-
-    printk("ste: Decision caches:\n");
-    /* go through all domains and adjust policy as if this domain was started 
now */
-    read_lock(&domlist_lock); /* go by domain? or directly by global? 
event/grant list */
-    pd = &domain_list;
-    for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-        printk("ste: Cache Domain %02x.\n", (*pd)->domain_id);
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(*pd)->ssid);
-       for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-               printk("\t\tcache[%02x] = %s, domid=%x.\n", i,
-                      (ste_ssid->ste_cache[i].valid == VALID) ? 
-                      "VALID" : "FREE",
-                      (ste_ssid->ste_cache[i].valid == VALID) ? 
-                      ste_ssid->ste_cache[i].id : 0xffffffff);
-    }
-    read_unlock(&domlist_lock);
-    /* init stats */
-    printk("STE-Policy Security Hook Statistics:\n");
-    printk("ste: event_channel eval_count      = %x\n", 
atomic_read(&(ste_bin_pol.ec_eval_count)));
-    printk("ste: event_channel denied_count    = %x\n", 
atomic_read(&(ste_bin_pol.ec_denied_count))); 
-    printk("ste: event_channel cache_hit_count = %x\n", 
atomic_read(&(ste_bin_pol.ec_cachehit_count)));
-    printk("ste:\n");
-    printk("ste: grant_table   eval_count      = %x\n", 
atomic_read(&(ste_bin_pol.gt_eval_count)));
-    printk("ste: grant_table   denied_count    = %x\n", 
atomic_read(&(ste_bin_pol.gt_denied_count))); 
-    printk("ste: grant_table   cache_hit_count = %x\n", 
atomic_read(&(ste_bin_pol.gt_cachehit_count)));
-#endif
-
-    if (buf_len < sizeof(struct acm_ste_stats_buffer))
-           return -ENOMEM;
 
     /* now send the hook counts to user space */
     stats.ec_eval_count = htonl(atomic_read(&ste_bin_pol.ec_eval_count));
@@ -379,6 +354,10 @@
     stats.gt_denied_count = htonl(atomic_read(&ste_bin_pol.gt_denied_count)); 
     stats.ec_cachehit_count = 
htonl(atomic_read(&ste_bin_pol.ec_cachehit_count));
     stats.gt_cachehit_count = 
htonl(atomic_read(&ste_bin_pol.gt_cachehit_count));
+
+    if (buf_len < sizeof(struct acm_ste_stats_buffer))
+        return -ENOMEM;
+
     memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
     return sizeof(struct acm_ste_stats_buffer);
 }
@@ -392,12 +371,12 @@
     if (ste_bin_pol.max_types > len)
         return -EFAULT;
 
-       if (ssidref >= ste_bin_pol.max_ssidrefs)
-               return -EFAULT;
+    if (ssidref >= ste_bin_pol.max_ssidrefs)
+        return -EFAULT;
 
     /* read types for chwall ssidref */
     for(i=0; i< ste_bin_pol.max_types; i++) {
-               if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
+        if (ste_bin_pol.ssidrefs[ssidref * ste_bin_pol.max_types + i])
             buf[i] = 1;
         else
             buf[i] = 0;
@@ -409,40 +388,40 @@
  * returns 1 == cache hit */
 static int inline
 check_cache(struct domain *dom, domid_t rdom) {
-       struct ste_ssid *ste_ssid;
-       int i;
-
-       printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(dom)->ssid);
-
-       for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
-               if ((ste_ssid->ste_cache[i].valid == VALID) &&
-                   (ste_ssid->ste_cache[i].id == rdom)) {
-                       printkd("cache hit (entry %x, id= %x!\n", i, 
ste_ssid->ste_cache[i].id);
-                       return 1;
-               }
-       }
-       return 0;
+    struct ste_ssid *ste_ssid;
+    int i;
+
+    printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
+    ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                         (struct acm_ssid_domain *)(dom)->ssid);
+
+    for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
+        if ((ste_ssid->ste_cache[i].valid == VALID) &&
+            (ste_ssid->ste_cache[i].id == rdom)) {
+            printkd("cache hit (entry %x, id= %x!\n", i, 
ste_ssid->ste_cache[i].id);
+            return 1;
+        }
+    }
+    return 0;
 }
 
 
 /* we only get here if there is NO entry yet; no duplication check! */
 static void inline
 cache_result(struct domain *subj, struct domain *obj) {
-       struct ste_ssid *ste_ssid;
-       int i;
-       printkd("caching from doms: %x --> %x.\n", subj->domain_id, 
obj->domain_id);
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(subj)->ssid);
-       for(i=0; i< ACM_TE_CACHE_SIZE; i++)
-               if (ste_ssid->ste_cache[i].valid == FREE)
-                       break;
-       if (i< ACM_TE_CACHE_SIZE) {
-               ste_ssid->ste_cache[i].valid = VALID;
-               ste_ssid->ste_cache[i].id = obj->domain_id;
-       } else
-               printk ("Cache of dom %x is full!\n", subj->domain_id);
+    struct ste_ssid *ste_ssid;
+    int i;
+    printkd("caching from doms: %x --> %x.\n", subj->domain_id, 
obj->domain_id);
+    ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                         (struct acm_ssid_domain *)(subj)->ssid);
+    for(i=0; i< ACM_TE_CACHE_SIZE; i++)
+        if (ste_ssid->ste_cache[i].valid == FREE)
+            break;
+    if (i< ACM_TE_CACHE_SIZE) {
+        ste_ssid->ste_cache[i].valid = VALID;
+        ste_ssid->ste_cache[i].id = obj->domain_id;
+    } else
+        printk ("Cache of dom %x is full!\n", subj->domain_id);
 }
 
 /* deletes entries for domain 'id' from all caches (re-use) */
@@ -458,12 +437,12 @@
     read_lock(&domlist_lock); /* look through caches of all domains */
     pd = &domain_list;
     for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
-       ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
-                        (struct acm_ssid_domain *)(*pd)->ssid);
-       for (i=0; i<ACM_TE_CACHE_SIZE; i++)
-           if ((ste_ssid->ste_cache[i].valid == VALID) &&
-               (ste_ssid->ste_cache[i].id = id))
-                   ste_ssid->ste_cache[i].valid = FREE;
+        ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
+                             (struct acm_ssid_domain *)(*pd)->ssid);
+        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+            if ((ste_ssid->ste_cache[i].valid == VALID) &&
+                (ste_ssid->ste_cache[i].id = id))
+                ste_ssid->ste_cache[i].valid = FREE;
     }
     read_unlock(&domlist_lock);
 }
@@ -482,15 +461,15 @@
     read_lock(&acm_bin_pol_rwlock);
     ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
     if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
-       printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", 
__func__);
-       read_unlock(&acm_bin_pol_rwlock);
-       return ACM_ACCESS_DENIED; /* catching and indicating config error */
+        printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", 
__func__);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED; /* catching and indicating config error */
     }
     if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
-       printk("%s: ERROR ste_ssidref > max(%x).\n", 
-              __func__, ste_bin_pol.max_ssidrefs-1);
-       read_unlock(&acm_bin_pol_rwlock);
-       return ACM_ACCESS_DENIED;
+        printk("%s: ERROR ste_ssidref > max(%x).\n", 
+               __func__, ste_bin_pol.max_ssidrefs-1);
+        read_unlock(&acm_bin_pol_rwlock);
+        return ACM_ACCESS_DENIED;
     }
     read_unlock(&acm_bin_pol_rwlock);
     return ACM_ACCESS_PERMITTED;
@@ -506,163 +485,193 @@
 /* -------- EVENTCHANNEL OPERATIONS -----------*/
 static int
 ste_pre_eventchannel_unbound(domid_t id) {
-       struct domain *subj, *obj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", 
-                   __func__, current->domain->domain_id, id);
-
-       if (check_cache(current->domain, id)) {
-               atomic_inc(&ste_bin_pol.ec_cachehit_count);
-               return ACM_ACCESS_PERMITTED;
-       }
-       atomic_inc(&ste_bin_pol.ec_eval_count);
-       subj = current->domain;
-       obj = find_domain_by_id(id);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.ec_denied_count); 
-               ret = ACM_ACCESS_DENIED;        
-       }
-       if (obj != NULL)
-               put_domain(obj);
-       return ret;
+    struct domain *subj, *obj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", 
+                __func__, current->domain->domain_id, id);
+
+    if (check_cache(current->domain, id)) {
+        atomic_inc(&ste_bin_pol.ec_cachehit_count);
+        return ACM_ACCESS_PERMITTED;
+    }
+    atomic_inc(&ste_bin_pol.ec_eval_count);
+    subj = current->domain;
+    obj = find_domain_by_id(id);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.ec_denied_count); 
+        ret = ACM_ACCESS_DENIED; 
+    }
+    if (obj != NULL)
+        put_domain(obj);
+    return ret;
 }
 
 static int
 ste_pre_eventchannel_interdomain(domid_t id1, domid_t id2)
 {
-       struct domain *subj, *obj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", __func__,
-                   (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
-                   (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
-
-       /* following is a bit longer but ensures that we
-         * "put" only domains that we where "find"-ing 
-        */
-       if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
-       if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
-
-       subj = find_domain_by_id(id1);
-       obj  = find_domain_by_id(id2);
-       if ((subj == NULL) || (obj == NULL)) {
-               ret = ACM_ACCESS_DENIED;
-               goto out;
-       }
-       /* cache check late, but evtchn is not on performance critical path */
-       if (check_cache(subj, obj->domain_id)) {
-               atomic_inc(&ste_bin_pol.ec_cachehit_count);
-               ret = ACM_ACCESS_PERMITTED;
-               goto out;
-       }
-       atomic_inc(&ste_bin_pol.ec_eval_count);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.ec_denied_count); 
-               ret = ACM_ACCESS_DENIED;        
-       }
+    struct domain *subj, *obj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", __func__,
+                (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
+                (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
+
+    /* following is a bit longer but ensures that we
+     * "put" only domains that we where "find"-ing 
+     */
+    if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
+    if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
+
+    subj = find_domain_by_id(id1);
+    obj  = find_domain_by_id(id2);
+    if ((subj == NULL) || (obj == NULL)) {
+        ret = ACM_ACCESS_DENIED;
+        goto out;
+    }
+    /* cache check late, but evtchn is not on performance critical path */
+    if (check_cache(subj, obj->domain_id)) {
+        atomic_inc(&ste_bin_pol.ec_cachehit_count);
+        ret = ACM_ACCESS_PERMITTED;
+        goto out;
+    }
+    atomic_inc(&ste_bin_pol.ec_eval_count);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.ec_denied_count); 
+        ret = ACM_ACCESS_DENIED; 
+    }
  out:
-       if (obj != NULL)
-               put_domain(obj);
-       if (subj != NULL)
-               put_domain(subj);
-       return ret;
+    if (obj != NULL)
+        put_domain(obj);
+    if (subj != NULL)
+        put_domain(subj);
+    return ret;
 }
 
 /* -------- SHARED MEMORY OPERATIONS -----------*/
 
 static int
 ste_pre_grant_map_ref (domid_t id) {
-       struct domain *obj, *subj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", __func__,
-                   current->domain->domain_id, id);
-
-       if (check_cache(current->domain, id)) {
-               atomic_inc(&ste_bin_pol.gt_cachehit_count);
-               return ACM_ACCESS_PERMITTED;
-       }
-       atomic_inc(&ste_bin_pol.gt_eval_count);
-       subj = current->domain;
-       obj = find_domain_by_id(id);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.gt_denied_count); 
-               printkd("%s: ACCESS DENIED!\n", __func__);
-               ret = ACM_ACCESS_DENIED;        
-       }
-       if (obj != NULL)
-               put_domain(obj);
-       return ret;
-}
+    struct domain *obj, *subj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", __func__,
+                current->domain->domain_id, id);
+
+    if (check_cache(current->domain, id)) {
+        atomic_inc(&ste_bin_pol.gt_cachehit_count);
+        return ACM_ACCESS_PERMITTED;
+    }
+    atomic_inc(&ste_bin_pol.gt_eval_count);
+    subj = current->domain;
+    obj = find_domain_by_id(id);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.gt_denied_count); 
+        printkd("%s: ACCESS DENIED!\n", __func__);
+        ret = ACM_ACCESS_DENIED; 
+    }
+    if (obj != NULL)
+        put_domain(obj);
+    return ret;
+}
+
 
 /* since setting up grant tables involves some implicit information
    flow from the creating domain to the domain that is setup, we 
    check types in addition to the general authorization */
 static int
 ste_pre_grant_setup (domid_t id) {
-       struct domain *obj, *subj;
-       int ret;
-       traceprintk("%s: dom%x-->dom%x.\n", __func__,
-                   current->domain->domain_id, id);
-
-       if (check_cache(current->domain, id)) {
-               atomic_inc(&ste_bin_pol.gt_cachehit_count);
-               return ACM_ACCESS_PERMITTED;
-       }
-       atomic_inc(&ste_bin_pol.gt_eval_count);
-       /* a) check authorization (eventually use specific capabilities) */
-       if (!IS_PRIV(current->domain)) {
-               printk("%s: Grant table management authorization denied 
ERROR!\n", __func__);
-               return ACM_ACCESS_DENIED;
-       }
-       /* b) check types */
-       subj = current->domain;
-       obj = find_domain_by_id(id);
-
-       if (share_common_type(subj, obj)) {
-               cache_result(subj, obj);
-               ret = ACM_ACCESS_PERMITTED;
-       } else {
-               atomic_inc(&ste_bin_pol.gt_denied_count); 
-               ret = ACM_ACCESS_DENIED;        
-       }
-       if (obj != NULL)
-               put_domain(obj);
-       return ret;
-}
+    struct domain *obj, *subj;
+    int ret;
+    traceprintk("%s: dom%x-->dom%x.\n", __func__,
+                current->domain->domain_id, id);
+
+    if (check_cache(current->domain, id)) {
+        atomic_inc(&ste_bin_pol.gt_cachehit_count);
+        return ACM_ACCESS_PERMITTED;
+    }
+    atomic_inc(&ste_bin_pol.gt_eval_count);
+    /* a) check authorization (eventually use specific capabilities) */
+    if (!IS_PRIV(current->domain)) {
+        printk("%s: Grant table management authorization denied ERROR!\n", 
__func__);
+        return ACM_ACCESS_DENIED;
+    }
+    /* b) check types */
+    subj = current->domain;
+    obj = find_domain_by_id(id);
+
+    if (share_common_type(subj, obj)) {
+        cache_result(subj, obj);
+        ret = ACM_ACCESS_PERMITTED;
+    } else {
+        atomic_inc(&ste_bin_pol.gt_denied_count); 
+        ret = ACM_ACCESS_DENIED; 
+    }
+    if (obj != NULL)
+        put_domain(obj);
+    return ret;
+}
+
+/* -------- DOMAIN-Requested Decision hooks -----------*/
+
+static int
+ste_sharing(ssidref_t ssidref1, ssidref_t ssidref2) {
+    if (have_common_type (
+        GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref1),
+        GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref2)
+        ))
+        return ACM_ACCESS_PERMITTED;
+    else
+        return ACM_ACCESS_DENIED;
+}
+
 
 /* now define the hook structure similarly to LSM */
 struct acm_operations acm_simple_type_enforcement_ops = {
-       /* policy management services */
-       .init_domain_ssid               = ste_init_domain_ssid,
-       .free_domain_ssid               = ste_free_domain_ssid,
-       .dump_binary_policy     = ste_dump_policy,
-       .set_binary_policy      = ste_set_policy,
-       .dump_statistics                = ste_dump_stats,
+
+    /* policy management services */
+    .init_domain_ssid  = ste_init_domain_ssid,
+    .free_domain_ssid  = ste_free_domain_ssid,
+    .dump_binary_policy     = ste_dump_policy,
+    .set_binary_policy      = ste_set_policy,
+    .dump_statistics  = ste_dump_stats,
     .dump_ssid_types        = ste_dump_ssid_types,
-       /* domain management control hooks */
-       .pre_domain_create              = ste_pre_domain_create,
-       .post_domain_create         = NULL,
-       .fail_domain_create     = NULL,
-       .post_domain_destroy    = ste_post_domain_destroy,
-       /* event channel control hooks */
-       .pre_eventchannel_unbound   = ste_pre_eventchannel_unbound,
-       .fail_eventchannel_unbound      = NULL,
-       .pre_eventchannel_interdomain   = ste_pre_eventchannel_interdomain,
-       .fail_eventchannel_interdomain  = NULL,
-       /* grant table control hooks */
-       .pre_grant_map_ref      = ste_pre_grant_map_ref,
-       .fail_grant_map_ref     = NULL,
-       .pre_grant_setup        = ste_pre_grant_setup,
-       .fail_grant_setup       = NULL,
+
+    /* domain management control hooks */
+    .pre_domain_create       = ste_pre_domain_create,
+    .post_domain_create     = NULL,
+    .fail_domain_create     = NULL,
+    .post_domain_destroy    = ste_post_domain_destroy,
+
+    /* event channel control hooks */
+    .pre_eventchannel_unbound   = ste_pre_eventchannel_unbound,
+    .fail_eventchannel_unbound = NULL,
+    .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
+    .fail_eventchannel_interdomain  = NULL,
+
+    /* grant table control hooks */
+    .pre_grant_map_ref      = ste_pre_grant_map_ref,
+    .fail_grant_map_ref     = NULL,
+    .pre_grant_setup        = ste_pre_grant_setup,
+    .fail_grant_setup       = NULL,
+    .sharing                = ste_sharing,
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c   Fri Oct 21 19:58:39 2005
+++ b/xen/arch/ia64/vmx/vmx_support.c   Mon Oct 24 15:08:13 2005
@@ -49,7 +49,7 @@
          */
        if (test_and_clear_bit(port,
                &d->shared_info->evtchn_pending[0])) {
-           clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
+           clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
            clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
            vmx_io_assist(v);
        }
@@ -67,7 +67,7 @@
             * nothing losed. Next loop will check I/O channel to fix this
             * window.
             */
-           clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
+           clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
        }
        else
            break;
@@ -139,8 +139,8 @@
     /* Clear indicator specific to interrupt delivered from DM */
     if (test_and_clear_bit(port,
                &d->shared_info->evtchn_pending[0])) {
-       if (!d->shared_info->evtchn_pending[port >> 5])
-           clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
+       if (!d->shared_info->evtchn_pending[port/BITS_PER_LONG])
+           clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
 
        if (!v->vcpu_info->evtchn_pending_sel)
            clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Oct 21 19:58:39 2005
+++ b/xen/arch/ia64/xen/domain.c        Mon Oct 24 15:08:13 2005
@@ -146,14 +146,24 @@
        continue_cpu_idle_loop();
 }
 
-struct vcpu *arch_alloc_vcpu_struct(void)
-{
-       /* Per-vp stack is used here. So we need keep vcpu
-        * same page as per-vp stack */
-       return alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER);
-}
-
-void arch_free_vcpu_struct(struct vcpu *v)
+struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
+{
+       struct vcpu *v;
+
+       if ((v = alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER)) == NULL)
+               return NULL;
+
+       memset(v, 0, sizeof(*v)); 
+        memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
+       v->arch.privregs = 
+               alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+       printf("arch_vcpu_info=%p\n", v->arch.privregs);
+       memset(v->arch.privregs, 0, PAGE_SIZE);
+
+       return v;
+}
+
+void free_vcpu_struct(struct vcpu *v)
 {
        free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
 }
@@ -253,13 +263,6 @@
        printf("arch_getdomaininfo_ctxt\n");
        c->regs = *regs;
        c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
-#if 0
-       if (c->vcpu.privregs && copy_to_user(c->vcpu.privregs,
-                       v->vcpu_info->arch.privregs, sizeof(mapped_regs_t))) {
-               printk("Bad ctxt address: 0x%lx\n", c->vcpu.privregs);
-               return -EFAULT;
-       }
-#endif
 
        c->shared = v->domain->shared_info->arch;
 }
@@ -291,12 +294,7 @@
 
            vmx_setup_platform(v, c);
        }
-    else{
-       v->arch.privregs =
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-           printf("arch_vcpu_info=%p\n", v->arch.privregs);
-       memset(v->arch.privregs, 0, PAGE_SIZE);
-    }
+
        *regs = c->regs;
        new_thread(v, regs->cr_iip, 0, 0);
 
@@ -314,18 +312,6 @@
        /* Don't redo final setup */
        set_bit(_VCPUF_initialised, &v->vcpu_flags);
        return 0;
-}
-
-void arch_do_boot_vcpu(struct vcpu *v)
-{
-       struct domain *d = v->domain;
-       printf("arch_do_boot_vcpu: not implemented\n");
-
-       d->vcpu[v->vcpu_id]->arch.privregs = 
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-       printf("arch_vcpu_info=%p\n", d->vcpu[v->vcpu_id]->arch.privregs);
-       memset(d->vcpu[v->vcpu_id]->arch.privregs, 0, PAGE_SIZE);
-       return;
 }
 
 void domain_relinquish_resources(struct domain *d)
@@ -383,7 +369,7 @@
                }
                VCPU(v, banknum) = 1;
                VCPU(v, metaphysical_mode) = 1;
-               d->shared_info->arch.flags = (d == dom0) ? 
(SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN)
 : 0;
+               d->shared_info->arch.flags = (d == dom0) ? 
(SIF_INITDOMAIN|SIF_PRIVILEGED) : 0;
        }
 }
 
@@ -836,12 +822,12 @@
        unsigned long ret, progress = 0;
 
 //printf("construct_dom0: starting\n");
+
+#ifndef CLONE_DOMAIN0
        /* Sanity! */
-#ifndef CLONE_DOMAIN0
-       if ( d != dom0 ) 
-           BUG();
-       if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
-           BUG();
+       BUG_ON(d != dom0);
+       BUG_ON(d->vcpu[0] == NULL);
+       BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 #endif
 
        memset(&dsi, 0, sizeof(struct domain_setup_info));
@@ -1004,14 +990,8 @@
        printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
        if (vmx_dom0)
            vmx_final_setup_domain(dom0);
-    else{
-       d->vcpu[0]->arch.privregs = 
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-           printf("arch_vcpu_info=%p\n", d->vcpu[0]->arch.privregs);
-       memset(d->vcpu[0]->arch.privregs, 0, PAGE_SIZE);
-    }
-
-       set_bit(_DOMF_constructed, &d->domain_flags);
+
+       set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
        new_thread(v, pkern_entry, 0, 0);
        physdev_init_dom0(d);
@@ -1043,7 +1023,7 @@
        unsigned long pkern_entry;
 
 #ifndef DOMU_AUTO_RESTART
-       if ( test_bit(_DOMF_constructed, &d->domain_flags) ) BUG();
+       BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 #endif
 
        printk("*** LOADING DOMAIN %d ***\n",d->domain_id);
@@ -1063,7 +1043,7 @@
        loaddomainelfimage(d,image_start);
        printk("loaddomainelfimage returns\n");
 
-       set_bit(_DOMF_constructed, &d->domain_flags);
+       set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
        printk("calling new_thread, entry=%p\n",pkern_entry);
 #ifdef DOMU_AUTO_RESTART
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/Makefile
--- a/xen/arch/x86/Makefile     Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/Makefile     Mon Oct 24 15:08:13 2005
@@ -4,6 +4,7 @@
 OBJS += $(patsubst %.S,%.o,$(wildcard $(TARGET_SUBARCH)/*.S))
 OBJS += $(patsubst %.c,%.o,$(wildcard $(TARGET_SUBARCH)/*.c))
 OBJS += $(patsubst %.c,%.o,$(wildcard acpi/*.c))
+OBJS += $(patsubst %.c,%.o,$(wildcard dm/*.c))
 OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c))
 OBJS += $(patsubst %.c,%.o,$(wildcard genapic/*.c))
 OBJS += $(patsubst %.c,%.o,$(wildcard cpu/*.c))
@@ -66,6 +67,7 @@
        rm -f x86_64/*.o x86_64/*~ x86_64/core
        rm -f mtrr/*.o mtrr/*~ mtrr/core
        rm -f acpi/*.o acpi/*~ acpi/core
+       rm -f dm/*.o dm/*~ dm/core
        rm -f genapic/*.o genapic/*~ genapic/core
        rm -f cpu/*.o cpu/*~ cpu/core
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c      Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/audit.c      Mon Oct 24 15:08:13 2005
@@ -516,16 +516,6 @@
                                    d->domain_id, mfn, page->u.inuse.type_info);
                             errors++;
                         }
-
-                        if ( (page->u.inuse.type_info & PGT_pinned) != 
PGT_pinned )
-                        {
-                            if ( !VM_ASSIST(d, 
VMASST_TYPE_writable_pagetables) )
-                            {
-                                printk("Audit %d: L1 mfn=%lx not pinned t=%"
-                                      PRtype_info "\n",
-                                       d->domain_id, mfn, 
page->u.inuse.type_info);
-                            }
-                        }
                     }
                 }
                 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/bitops.c
--- a/xen/arch/x86/bitops.c     Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/bitops.c     Mon Oct 24 15:08:13 2005
@@ -29,7 +29,7 @@
     const unsigned long *p = addr + (offset / BITS_PER_LONG);
     unsigned int set, bit = offset & (BITS_PER_LONG - 1);
 
-    ASSERT(offset < size);
+    ASSERT(offset <= size);
 
     if ( bit != 0 )
     {
@@ -78,7 +78,7 @@
     const unsigned long *p = addr + (offset / BITS_PER_LONG);
     unsigned int set, bit = offset & (BITS_PER_LONG - 1);
 
-    ASSERT(offset < size);
+    ASSERT(offset <= size);
 
     if ( bit != 0 )
     {
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/domain.c     Mon Oct 24 15:08:13 2005
@@ -1,6 +1,6 @@
 /******************************************************************************
  * arch/x86/domain.c
- * 
+ *
  * x86-specific domain handling (e.g., register setup and context switching).
  */
 
@@ -39,7 +39,6 @@
 #include <asm/msr.h>
 #include <asm/physdev.h>
 #include <xen/kernel.h>
-#include <public/io/ioreq.h>
 #include <xen/multicall.h>
 
 /* opt_noreboot: If true, machine will need manual reset on error. */
@@ -145,9 +144,7 @@
     smp_send_stop();
     disable_IO_APIC();
 
-#ifdef CONFIG_VMX
     stop_vmx();
-#endif
 
     /* Rebooting needs to touch the page at absolute address 0. */
     *((unsigned short *)__va(0x472)) = reboot_mode;
@@ -205,28 +202,39 @@
                page->u.inuse.type_info);
     }
 
-    
     page = virt_to_page(d->shared_info);
     printk("Shared_info@%p: caf=%08x, taf=%" PRtype_info "\n",
            _p(page_to_phys(page)), page->count_info,
            page->u.inuse.type_info);
 }
 
-struct vcpu *arch_alloc_vcpu_struct(void)
-{
-    return xmalloc(struct vcpu);
-}
-
-/* We assume that vcpu 0 is always the last one to be freed in a
-   domain i.e. if v->vcpu_id == 0, the domain should be
-   single-processor. */
-void arch_free_vcpu_struct(struct vcpu *v)
-{
-    struct vcpu *p;
-    for_each_vcpu(v->domain, p) {
-        if (p->next_in_list == v)
-            p->next_in_list = v->next_in_list;
-    }
+struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
+{
+    struct vcpu *v;
+
+    if ( (v = xmalloc(struct vcpu)) == NULL )
+        return NULL;
+
+    memset(v, 0, sizeof(*v));
+
+    memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
+    v->arch.flags = TF_kernel_mode;
+
+    if ( (v->vcpu_id = vcpu_id) != 0 )
+    {
+        v->arch.schedule_tail  = d->vcpu[0]->arch.schedule_tail;
+        v->arch.perdomain_ptes =
+            d->arch.mm_perdomain_pt + (vcpu_id << PDPT_VCPU_SHIFT);
+    }
+
+    return v;
+}
+
+void free_vcpu_struct(struct vcpu *v)
+{
+    BUG_ON(v->next_in_list != NULL);
+    if ( v->vcpu_id != 0 )
+        v->domain->vcpu[v->vcpu_id - 1]->next_in_list = NULL;
     xfree(v);
 }
 
@@ -242,14 +250,14 @@
 void arch_do_createdomain(struct vcpu *v)
 {
     struct domain *d = v->domain;
-
-    v->arch.flags = TF_kernel_mode;
+    l1_pgentry_t gdt_l1e;
+    int vcpuid;
 
     if ( is_idle_task(d) )
         return;
 
     v->arch.schedule_tail = continue_nonidle_task;
-    
+
     d->shared_info = alloc_xenheap_page();
     memset(d->shared_info, 0, PAGE_SIZE);
     v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
@@ -257,14 +265,24 @@
     SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
     set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
             INVALID_M2P_ENTRY);
-    
+
     d->arch.mm_perdomain_pt = alloc_xenheap_page();
     memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
     set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT,
             INVALID_M2P_ENTRY);
     v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
-    v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
-        l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+
+    /*
+     * Map Xen segments into every VCPU's GDT, irrespective of whether every
+     * VCPU will actually be used. This avoids an NMI race during context
+     * switch: if we take an interrupt after switching CR3 but before switching
+     * GDT, and the old VCPU# is invalid in the new domain, we would otherwise
+     * try to load CS from an invalid table.
+     */
+    gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+    for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
+        d->arch.mm_perdomain_pt[
+            (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] = gdt_l1e;
 
     v->arch.guest_vtable  = __linear_l2_table;
     v->arch.shadow_vtable = __shadow_linear_l2_table;
@@ -272,37 +290,23 @@
 #ifdef __x86_64__
     v->arch.guest_vl3table = __linear_l3_table;
     v->arch.guest_vl4table = __linear_l4_table;
-    
+
     d->arch.mm_perdomain_l2 = alloc_xenheap_page();
     memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
-    d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = 
+    d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
         l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt),
                         __PAGE_HYPERVISOR);
     d->arch.mm_perdomain_l3 = alloc_xenheap_page();
     memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
-    d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 
+    d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
         l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
                             __PAGE_HYPERVISOR);
 #endif
-    
+
     (void)ptwr_init(d);
-    
-    shadow_lock_init(d);        
+
+    shadow_lock_init(d);
     INIT_LIST_HEAD(&d->arch.free_shadow_frames);
-}
-
-void arch_do_boot_vcpu(struct vcpu *v)
-{
-    struct domain *d = v->domain;
-
-    v->arch.flags = TF_kernel_mode;
-
-    v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail;
-
-    v->arch.perdomain_ptes =
-        d->arch.mm_perdomain_pt + (v->vcpu_id << PDPT_VCPU_SHIFT);
-    v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
-        l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
 }
 
 void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
@@ -319,34 +323,6 @@
         v->arch.schedule_tail = arch_vmx_do_relaunch;
     }
 }
-
-#ifdef CONFIG_VMX
-static int vmx_switch_on;
-
-static void vmx_final_setup_guest(struct vcpu *v)
-{
-    v->arch.schedule_tail = arch_vmx_do_launch;
-
-    if (v == v->domain->vcpu[0]) {
-        /*
-         * Required to do this once per domain
-         * XXX todo: add a seperate function to do these.
-         */
-        memset(&v->domain->shared_info->evtchn_mask[0], 0xff,
-               sizeof(v->domain->shared_info->evtchn_mask));
-
-        /* Put the domain in shadow mode even though we're going to be using
-         * the shared 1:1 page table initially. It shouldn't hurt */
-        shadow_mode_enable(v->domain,
-                           SHM_enable|SHM_refcounts|
-                           SHM_translate|SHM_external);
-    }
-
-    if (!vmx_switch_on)
-        vmx_switch_on = 1;
-}
-#endif
-
 
 /* This is called by arch_final_setup_guest and do_boot_vcpu */
 int arch_set_info_guest(
@@ -415,7 +391,7 @@
     }
     else if ( !(c->flags & VGCF_VMX_GUEST) )
     {
-        if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d, 
+        if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
                                 PGT_base_page_table) )
             return -EINVAL;
     }
@@ -500,12 +476,6 @@
         : "=r" (__r) : "r" (value), "0" (__r) );\
     __r; })
 
-#if CONFIG_VMX
-#define load_msrs(n)     if (vmx_switch_on) vmx_load_msrs(n)
-#else
-#define load_msrs(n)     ((void)0)
-#endif 
-
 /*
  * save_segments() writes a mask of segments which are dirty (non-zero),
  * allowing load_segments() to avoid some expensive segment loads and
@@ -583,7 +553,7 @@
         struct cpu_user_regs *regs = guest_cpu_user_regs();
         unsigned long   *rsp =
             (n->arch.flags & TF_kernel_mode) ?
-            (unsigned long *)regs->rsp : 
+            (unsigned long *)regs->rsp :
             (unsigned long *)nctxt->kernel_sp;
 
         if ( !(n->arch.flags & TF_kernel_mode) )
@@ -683,9 +653,9 @@
         regs->r11 = stu.r11;
         regs->rcx = stu.rcx;
     }
-    
+
     /* Saved %rax gets written back to regs->rax in entry.S. */
-    return stu.rax; 
+    return stu.rax;
 }
 
 #define switch_kernel_stack(_n,_c) ((void)0)
@@ -693,7 +663,6 @@
 #elif defined(__i386__)
 
 #define load_segments(n) ((void)0)
-#define load_msrs(n)     ((void)0)
 #define save_segments(p) ((void)0)
 
 static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu)
@@ -718,7 +687,7 @@
     if ( !is_idle_task(p->domain) )
     {
         memcpy(&p->arch.guest_context.user_regs,
-               stack_regs, 
+               stack_regs,
                CTXT_SWITCH_STACK_BYTES);
         unlazy_fpu(p);
         save_segments(p);
@@ -804,7 +773,7 @@
         {
             load_LDT(next);
             load_segments(next);
-            load_msrs(next);
+            vmx_load_msrs(next);
         }
     }
 
@@ -876,7 +845,7 @@
 #if defined(__i386__)
         regs->eax  = op;
         regs->eip -= 2;  /* re-execute 'int 0x82' */
-        
+
         for ( i = 0; i < nr_args; i++ )
         {
             switch ( i )
@@ -892,7 +861,7 @@
 #elif defined(__x86_64__)
         regs->rax  = op;
         regs->rip -= 2;  /* re-execute 'syscall' */
-        
+
         for ( i = 0; i < nr_args; i++ )
         {
             switch ( i )
@@ -913,20 +882,6 @@
     return op;
 }
 
-#ifdef CONFIG_VMX
-static void vmx_relinquish_resources(struct vcpu *v)
-{
-    if ( !VMX_DOMAIN(v) )
-        return;
-
-    destroy_vmcs(&v->arch.arch_vmx);
-    free_monitor_pagetable(v);
-    rem_ac_timer(&v->domain->arch.vmx_platform.vmx_pit.pit_timer);
-}
-#else
-#define vmx_relinquish_resources(_v) ((void)0)
-#endif
-
 static void relinquish_memory(struct domain *d, struct list_head *list)
 {
     struct list_head *ent;
@@ -965,7 +920,7 @@
         for ( ; ; )
         {
             x = y;
-            if ( likely((x & (PGT_type_mask|PGT_validated)) != 
+            if ( likely((x & (PGT_type_mask|PGT_validated)) !=
                         (PGT_base_page_table|PGT_validated)) )
                 break;
 
@@ -1026,7 +981,7 @@
     shadow_mode_disable(d);
 
     /*
-     * Relinquish GDT mappings. No need for explicit unmapping of the LDT as 
+     * Relinquish GDT mappings. No need for explicit unmapping of the LDT as
      * it automatically gets squashed when the guest's mappings go away.
      */
     for_each_vcpu(d, v)
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/domain_build.c       Mon Oct 24 15:08:13 2005
@@ -14,6 +14,7 @@
 #include <xen/event.h>
 #include <xen/elf.h>
 #include <xen/kernel.h>
+#include <xen/domain.h>
 #include <asm/regs.h>
 #include <asm/system.h>
 #include <asm/io.h>
@@ -146,10 +147,9 @@
         struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn);
 
     /* Sanity! */
-    if ( d->domain_id != 0 ) 
-        BUG();
-    if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
-        BUG();
+    BUG_ON(d->domain_id != 0);
+    BUG_ON(d->vcpu[0] == NULL);
+    BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 
     memset(&dsi, 0, sizeof(struct domain_setup_info));
     dsi.image_addr = (unsigned long)image_start;
@@ -557,7 +557,9 @@
     /* Mask all upcalls... */
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
         d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
-    d->shared_info->n_vcpu = num_online_cpus();
+
+    for ( i = 1; i < num_online_cpus(); i++ )
+        (void)alloc_vcpu(d, i, i);
 
     /* Set up monitor table */
     update_pagetables(v);
@@ -585,7 +587,7 @@
     /* Set up start info area. */
     si = (start_info_t *)vstartinfo_start;
     memset(si, 0, PAGE_SIZE);
-    si->nr_pages     = nr_pages;
+    si->nr_pages = nr_pages;
 
     if ( opt_dom0_translate )
     {
@@ -657,7 +659,7 @@
 
     init_domain_time(d);
 
-    set_bit(_DOMF_constructed, &d->domain_flags);
+    set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
     new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/mm.c Mon Oct 24 15:08:13 2005
@@ -185,7 +185,7 @@
      * Any Xen-heap pages that we will allow to be mapped will have
      * their domain field set to dom_xen.
      */
-    dom_xen = alloc_domain_struct();
+    dom_xen = alloc_domain();
     atomic_set(&dom_xen->refcnt, 1);
     dom_xen->domain_id = DOMID_XEN;
 
@@ -194,7 +194,7 @@
      * This domain owns I/O pages that are within the range of the pfn_info
      * array. Mappings occur at the priv of the caller.
      */
-    dom_io = alloc_domain_struct();
+    dom_io = alloc_domain();
     atomic_set(&dom_io->refcnt, 1);
     dom_io->domain_id = DOMID_IO;
 
@@ -366,9 +366,6 @@
 
     if ( unlikely(!get_page_type(page, type)) )
     {
-        if ( (type & PGT_type_mask) != PGT_l1_page_table )
-            MEM_LOG("Bad page type for pfn %lx (%" PRtype_info ")", 
-                    page_nr, page->u.inuse.type_info);
         put_page(page);
         return 0;
     }
@@ -438,6 +435,7 @@
 {
     unsigned long mfn = l1e_get_pfn(l1e);
     struct pfn_info *page = &frame_table[mfn];
+    int okay;
     extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
 
     if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
@@ -470,9 +468,17 @@
         d = dom_io;
     }
 
-    return ((l1e_get_flags(l1e) & _PAGE_RW) ?
+    okay = ((l1e_get_flags(l1e) & _PAGE_RW) ?
             get_page_and_type(page, d, PGT_writable_page) :
             get_page(page, d));
+    if ( !okay )
+    {
+        MEM_LOG("Error getting mfn %lx (pfn %lx) from L1 entry %" PRIpte
+                " for dom%d",
+                mfn, get_pfn_from_mfn(mfn), l1e_get_intpte(l1e), d->domain_id);
+    }
+
+    return okay;
 }
 
 
@@ -582,6 +588,7 @@
     unsigned long    pfn  = l1e_get_pfn(l1e);
     struct pfn_info *page = &frame_table[pfn];
     struct domain   *e;
+    struct vcpu     *v;
 
     if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !pfn_valid(pfn) )
         return;
@@ -615,10 +622,12 @@
         /* We expect this is rare so we blow the entire shadow LDT. */
         if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) == 
                        PGT_ldt_page)) &&
-             unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) )
-
-            // XXX SMP BUG?
-            invalidate_shadow_ldt(e->vcpu[0]);
+             unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) &&
+             (d == e) )
+        {
+            for_each_vcpu ( d, v )
+                invalidate_shadow_ldt(v);
+        }
         put_page(page);
     }
 }
@@ -679,6 +688,7 @@
     return 1;
 
  fail:
+    MEM_LOG("Failure in alloc_l1_table: entry %d", i);
     while ( i-- > 0 )
         if ( is_guest_l1_slot(i) )
             put_page_from_l1e(pl1e[i], d);
@@ -838,6 +848,7 @@
     return 1;
 
  fail:
+    MEM_LOG("Failure in alloc_l2_table: entry %d", i);
     while ( i-- > 0 )
         if ( is_guest_l2_slot(type, i) )
             put_page_from_l2e(pl2e[i], pfn);
@@ -1311,7 +1322,7 @@
         }
     }
 
-    switch (type  & PGT_type_mask)
+    switch ( type & PGT_type_mask )
     {
     case PGT_l1_page_table:
         free_l1_table(page);
@@ -1450,7 +1461,7 @@
                     if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
                          ((type & PGT_type_mask) != PGT_l1_page_table) )
                         MEM_LOG("Bad type (saw %" PRtype_info
-                                "!= exp %" PRtype_info ") "
+                                " != exp %" PRtype_info ") "
                                 "for mfn %lx (pfn %lx)",
                                 x, type, page_to_pfn(page),
                                 get_pfn_from_mfn(page_to_pfn(page)));
@@ -1491,11 +1502,10 @@
         /* Try to validate page type; drop the new reference on failure. */
         if ( unlikely(!alloc_page_type(page, type)) )
         {
-            MEM_LOG("Error while validating pfn %lx for type %" PRtype_info "."
-                    " caf=%08x taf=%" PRtype_info,
-                    page_to_pfn(page), type,
-                    page->count_info,
-                    page->u.inuse.type_info);
+            MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %"
+                    PRtype_info ": caf=%08x taf=%" PRtype_info,
+                    page_to_pfn(page), get_pfn_from_mfn(page_to_pfn(page)),
+                    type, page->count_info, page->u.inuse.type_info);
             /* Noone else can get a reference. We hold the only ref. */
             page->u.inuse.type_info = 0;
             return 0;
@@ -1752,7 +1762,7 @@
             goto pin_page;
 
         case MMUEXT_UNPIN_TABLE:
-            if ( unlikely(!(okay = get_page_from_pagenr(mfn, FOREIGNDOM))) )
+            if ( unlikely(!(okay = get_page_from_pagenr(mfn, d))) )
             {
                 MEM_LOG("Mfn %lx bad domain (dom=%p)",
                         mfn, page_get_owner(page));
@@ -2905,6 +2915,7 @@
 {
     l1_pgentry_t ol1e, nl1e;
     int modified = 0, i;
+    struct vcpu *v;
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
     {
@@ -2937,7 +2948,11 @@
              */
             memcpy(&l1page[i], &snapshot[i],
                    (L1_PAGETABLE_ENTRIES - i) * sizeof(l1_pgentry_t));
-            domain_crash();
+
+            /* Crash the offending domain. */
+            set_bit(_DOMF_ctrl_pause, &d->domain_flags);
+            for_each_vcpu ( d, v )
+                vcpu_sleep_nosync(v);
             break;
         }
         
@@ -3016,8 +3031,8 @@
     modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page);
     unmap_domain_page(pl1e);
     perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
-    ptwr_eip_stat_update(  d->arch.ptwr[which].eip, d->domain_id, modified);
-    d->arch.ptwr[which].prev_nr_updates  = modified;
+    ptwr_eip_stat_update(d->arch.ptwr[which].eip, d->domain_id, modified);
+    d->arch.ptwr[which].prev_nr_updates = modified;
 
     /*
      * STEP 3. Reattach the L1 p.t. page into the current address space.
@@ -3366,7 +3381,9 @@
 
 void ptwr_destroy(struct domain *d)
 {
+    LOCK_BIGLOCK(d);
     cleanup_writable_pagetable(d);
+    UNLOCK_BIGLOCK(d);
     free_xenheap_page(d->arch.ptwr[PTWR_PT_ACTIVE].page);
     free_xenheap_page(d->arch.ptwr[PTWR_PT_INACTIVE].page);
 }
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/setup.c      Mon Oct 24 15:08:13 2005
@@ -78,7 +78,6 @@
 extern void init_IRQ(void);
 extern void trap_init(void);
 extern void early_time_init(void);
-extern void ac_timer_init(void);
 extern void initialize_keytable(void);
 extern void early_cpu_init(void);
 
@@ -142,6 +141,7 @@
 static void __init start_of_day(void)
 {
     int i;
+    unsigned long vgdt, gdt_pfn;
 
     early_cpu_init();
 
@@ -159,10 +159,17 @@
 
     arch_do_createdomain(current);
     
-    /* Map default GDT into their final position in the idle page table. */
-    map_pages_to_xen(
-        GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE,
-        virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
+    /*
+     * Map default GDT into its final positions in the idle page table. As
+     * noted in arch_do_createdomain(), we must map for every possible VCPU#.
+     */
+    vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE;
+    gdt_pfn = virt_to_phys(gdt_table) >> PAGE_SHIFT;
+    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+    {
+        map_pages_to_xen(vgdt, gdt_pfn, 1, PAGE_HYPERVISOR);
+        vgdt += 1 << PDPT_VCPU_VA_SHIFT;
+    }
 
     find_smp_config();
 
@@ -420,6 +427,9 @@
            nr_pages << (PAGE_SHIFT - 10));
     total_pages = nr_pages;
 
+    /* Sanity check for unwanted bloat of dom0_op_t structure. */
+    BUG_ON(sizeof(((dom0_op_t *)0)->u) != sizeof(((dom0_op_t *)0)->u.pad));
+
     init_frametable();
 
     end_boot_allocator();
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/shadow.c     Mon Oct 24 15:08:13 2005
@@ -616,13 +616,6 @@
         pt_va = ((va >> L1_PAGETABLE_SHIFT) & ~(entries - 1)) << 
L1_PAGETABLE_SHIFT;
         spl1e = (l1_pgentry_t *) __shadow_get_l1e(v, pt_va, &tmp_sl1e);
 
-        /*
-        gpl1e = &(linear_pg_table[l1_linear_offset(va) &
-                              ~(L1_PAGETABLE_ENTRIES-1)]);
-
-        spl1e = &(shadow_linear_pg_table[l1_linear_offset(va) &
-                                     ~(L1_PAGETABLE_ENTRIES-1)]);*/
-
         for ( i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++ )
         {
             l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
@@ -645,7 +638,8 @@
                 min = i;
             if ( likely(i > max) )
                 max = i;
-        }
+            set_guest_back_ptr(d, sl1e, sl1mfn, i);
+          }
 
         frame_table[sl1mfn].tlbflush_timestamp =
             SHADOW_ENCODE_MIN_MAX(min, max);
@@ -716,8 +710,8 @@
         }
     }
 
+    set_guest_back_ptr(d, new_spte, l2e_get_pfn(sl2e), l1_table_offset(va));
     __shadow_set_l1e(v, va, &new_spte);
-
     shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
 }
 
@@ -877,6 +871,7 @@
 
     perfc_incrc(shadow_mark_mfn_out_of_sync_calls);
 
+    entry->v = v;
     entry->gpfn = gpfn;
     entry->gmfn = mfn;
     entry->snapshot_mfn = shadow_make_snapshot(d, gpfn, mfn);
@@ -943,6 +938,7 @@
     entry->writable_pl1e =
         l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
     ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
+    entry->va = va;
 
     // Increment shadow's page count to represent the reference
     // inherent in entry->writable_pl1e
@@ -1133,6 +1129,24 @@
         delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred);
         perfc_decr(writable_pte_predictions);
     }
+}
+
+static int fix_entry(
+    struct domain *d, 
+    l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
+{
+    l1_pgentry_t old = *pt;
+    l1_pgentry_t new = old;
+
+    l1e_remove_flags(new,_PAGE_RW);
+    if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
+        BUG();
+    (*found)++;
+    *pt = new;
+    if ( is_l1_shadow )
+        shadow_put_page_from_l1e(old, d);
+
+    return (*found == max_refs_to_find);
 }
 
 static u32 remove_all_write_access_in_ptpage(
@@ -1156,49 +1170,28 @@
 
     match = l1e_from_pfn(readonly_gmfn, flags);
 
-    // returns true if all refs have been found and fixed.
-    //
-    int fix_entry(int i)
-    {
-        l1_pgentry_t old = pt[i];
-        l1_pgentry_t new = old;
-
-        l1e_remove_flags(new,_PAGE_RW);
-        if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
-            BUG();
-        found++;
-        pt[i] = new;
-        if ( is_l1_shadow )
-            shadow_put_page_from_l1e(old, d);
-
-#if 0
-        printk("removed write access to pfn=%lx mfn=%lx in smfn=%lx entry %x "
-               "is_l1_shadow=%d\n",
-               readonly_gpfn, readonly_gmfn, pt_mfn, i, is_l1_shadow);
-#endif
-
-        return (found == max_refs_to_find);
-    }
-
-    i = readonly_gpfn & (GUEST_L1_PAGETABLE_ENTRIES - 1);
-    if ( !l1e_has_changed(pt[i], match, flags) && fix_entry(i) )
-    {
-        perfc_incrc(remove_write_fast_exit);
-        increase_writable_pte_prediction(d, readonly_gpfn, prediction);
-        unmap_domain_page(pt);
-        return found;
+    if ( shadow_mode_external(d) ) {
+        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
+            >> PGT_va_shift;
+
+        if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) &&
+             !l1e_has_changed(pt[i], match, flags) && 
+             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) &&
+             !prediction )
+            goto out;
     }
  
     for (i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++)
     {
-        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && fix_entry(i) )
+        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 
+             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) )
             break;
     }
 
+out:
     unmap_domain_page(pt);
 
     return found;
-#undef MATCH_ENTRY
 }
 
 static int remove_all_write_access(
@@ -1206,8 +1199,8 @@
 {
     int i;
     struct shadow_status *a;
-    u32 found = 0, fixups, write_refs;
-    unsigned long prediction, predicted_gpfn, predicted_smfn;
+    u32 found = 0, write_refs;
+    unsigned long predicted_smfn;
 
     ASSERT(shadow_lock_is_acquired(d));
     ASSERT(VALID_MFN(readonly_gmfn));
@@ -1238,26 +1231,18 @@
         return 1;
     }
 
-    // Before searching all the L1 page tables, check the typical culprit first
-    //
-    if ( (prediction = predict_writable_pte_page(d, readonly_gpfn)) )
-    {
-        predicted_gpfn = prediction & PGT_mfn_mask;
-        if ( (predicted_smfn = __shadow_status(d, predicted_gpfn, 
PGT_l1_shadow)) &&
-             (fixups = remove_all_write_access_in_ptpage(d, predicted_gpfn, 
predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, prediction)) )
-        {
-            found += fixups;
-            if ( found == write_refs )
-            {
-                perfc_incrc(remove_write_predicted);
-                return 1;
-            }
-        }
-        else
-        {
-            perfc_incrc(remove_write_bad_prediction);
-            decrease_writable_pte_prediction(d, readonly_gpfn, prediction);
-        }
+    if ( shadow_mode_external(d) ) {
+        if (write_refs-- == 0) 
+            return 0;
+
+         // Use the back pointer to locate the shadow page that can contain
+         // the PTE of interest
+         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) 
) {
+             found += remove_all_write_access_in_ptpage(
+                 d, predicted_smfn, predicted_smfn, readonly_gpfn, 
readonly_gmfn, write_refs, 0);
+             if ( found == write_refs )
+                 return 0;
+         }
     }
 
     // Search all the shadow L1 page tables...
@@ -1276,7 +1261,7 @@
             {
                 found += remove_all_write_access_in_ptpage(d, 
a->gpfn_and_flags & PGT_mfn_mask, a->smfn, readonly_gpfn, readonly_gmfn, 
write_refs - found, a->gpfn_and_flags & PGT_mfn_mask);
                 if ( found == write_refs )
-                    return 1;
+                    return 0;
             }
 
             a = a->next;
@@ -1357,6 +1342,7 @@
             guest_l1_pgentry_t *guest1 = guest;
             l1_pgentry_t *shadow1 = shadow;
             guest_l1_pgentry_t *snapshot1 = snapshot;
+            int unshadow_l1 = 0;
 
             ASSERT(VM_ASSIST(d, VMASST_TYPE_writable_pagetables) ||
                    shadow_mode_write_all(d));
@@ -1375,8 +1361,15 @@
                 if ( (i < min_snapshot) || (i > max_snapshot) ||
                      guest_l1e_has_changed(guest1[i], snapshot1[i], 
PAGE_FLAG_MASK) )
                 {
-                    need_flush |= validate_pte_change(d, guest1[i], 
&shadow1[i]);
-
+                    int error; 
+
+                    error = validate_pte_change(d, guest1[i], &shadow1[i]);
+                    if ( error ==  -1 ) 
+                        unshadow_l1 = 1;
+                    else {
+                        need_flush |= error;
+                        set_guest_back_ptr(d, shadow1[i], smfn, i);
+                    }
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
                     //
@@ -1388,6 +1381,19 @@
             perfc_incrc(resync_l1);
             perfc_incr_histo(wpt_updates, changed, PT_UPDATES);
             perfc_incr_histo(l1_entries_checked, max_shadow - min_shadow + 1, 
PT_UPDATES);
+            if (unshadow_l1) {
+                pgentry_64_t l2e;
+
+                __shadow_get_l2e(entry->v, entry->va, &l2e);
+                if (entry_get_flags(l2e) & _PAGE_PRESENT) {
+                    entry_remove_flags(l2e, _PAGE_PRESENT);
+                    __shadow_set_l2e(entry->v, entry->va, &l2e);
+
+                    if (entry->v == current)
+                        need_flush = 1;
+                }
+            }
+
             break;
         }
 #if defined (__i386__)
@@ -1604,6 +1610,8 @@
              !shadow_get_page_from_l1e(npte, d) )
             BUG();
         *ppte = npte;
+        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 
+                           (entry->writable_pl1e & 
~PAGE_MASK)/sizeof(l1_pgentry_t));
         shadow_put_page_from_l1e(opte, d);
 
         unmap_domain_page(ppte);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/shadow32.c   Mon Oct 24 15:08:13 2005
@@ -1032,7 +1032,12 @@
             if ( !get_page_type(page, PGT_writable_page) )
                 BUG();
             put_page_type(page);
-
+            /*
+             * We use tlbflush_timestamp as back pointer to smfn, and need to
+             * clean up it.
+             */
+            if ( shadow_mode_external(d) )
+                page->tlbflush_timestamp = 0;
             list_ent = page->list.next;
         }
     }
@@ -1181,14 +1186,6 @@
 {
     if ( unlikely(!shadow_mode_enabled(d)) )
         return;
-
-    /*
-     * Currently this does not fix up page ref counts, so it is valid to call
-     * only when a domain is being destroyed.
-     */
-    BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags) &&
-           shadow_mode_refcounts(d));
-    d->arch.shadow_tainted_refcnts = shadow_mode_refcounts(d);
 
     free_shadow_pages(d);
     free_writable_pte_predictions(d);
@@ -1390,18 +1387,6 @@
     return rc;
 }
 
-/*
- * XXX KAF: Why is this VMX specific?
- */
-void vmx_shadow_clear_state(struct domain *d)
-{
-    SH_VVLOG("%s:", __func__);
-    shadow_lock(d);
-    free_shadow_pages(d);
-    shadow_unlock(d);
-    update_pagetables(d->vcpu[0]);
-}
-
 unsigned long
 gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
 {
@@ -1462,14 +1447,10 @@
 
     hl2 = map_domain_page(hl2mfn);
 
-#ifdef __i386__
     if ( shadow_mode_external(d) )
         limit = L2_PAGETABLE_ENTRIES;
     else
         limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
-#else
-    limit = 0; /* XXX x86/64 XXX */
-#endif
 
     memset(hl2, 0, limit * sizeof(l1_pgentry_t));
 
@@ -1665,6 +1646,7 @@
                 min = i;
             if ( likely(i > max) )
                 max = i;
+            set_guest_back_ptr(d, sl1e, sl1mfn, i);
         }
 
         frame_table[sl1mfn].tlbflush_timestamp =
@@ -1847,6 +1829,7 @@
 
     perfc_incrc(shadow_mark_mfn_out_of_sync_calls);
 
+    entry->v = v;
     entry->gpfn = gpfn;
     entry->gmfn = mfn;
     entry->snapshot_mfn = shadow_make_snapshot(d, gpfn, mfn);
@@ -1893,6 +1876,7 @@
     entry->writable_pl1e =
         l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
     ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
+    entry->va = va;
 
     // Increment shadow's page count to represent the reference
     // inherent in entry->writable_pl1e
@@ -2070,6 +2054,24 @@
 
         xfree(gpfn_list);
     }
+}
+
+static int fix_entry(
+    struct domain *d, 
+    l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
+{
+    l1_pgentry_t old = *pt;
+    l1_pgentry_t new = old;
+
+    l1e_remove_flags(new,_PAGE_RW);
+    if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
+        BUG();
+    (*found)++;
+    *pt = new;
+    if ( is_l1_shadow )
+        shadow_put_page_from_l1e(old, d);
+
+    return (*found == max_refs_to_find);
 }
 
 static u32 remove_all_write_access_in_ptpage(
@@ -2088,49 +2090,28 @@
 
     match = l1e_from_pfn(readonly_gmfn, flags);
 
-    // returns true if all refs have been found and fixed.
-    //
-    int fix_entry(int i)
-    {
-        l1_pgentry_t old = pt[i];
-        l1_pgentry_t new = old;
-
-        l1e_remove_flags(new,_PAGE_RW);
-        if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
-            BUG();
-        found++;
-        pt[i] = new;
-        if ( is_l1_shadow )
-            shadow_put_page_from_l1e(old, d);
-
-#if 0
-        printk("removed write access to pfn=%lx mfn=%lx in smfn=%lx entry %x "
-               "is_l1_shadow=%d\n",
-               readonly_gpfn, readonly_gmfn, pt_mfn, i, is_l1_shadow);
-#endif
-
-        return (found == max_refs_to_find);
-    }
-
-    i = readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1);
-    if ( !l1e_has_changed(pt[i], match, flags) && fix_entry(i) )
-    {
-        perfc_incrc(remove_write_fast_exit);
-        increase_writable_pte_prediction(d, readonly_gpfn, prediction);
-        unmap_domain_page(pt);
-        return found;
-    }
- 
+    if ( shadow_mode_external(d) ) {
+        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
+            >> PGT_va_shift;
+
+        if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) &&
+             !l1e_has_changed(pt[i], match, flags) && 
+             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) &&
+             !prediction )
+            goto out;
+    }
+
     for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
     {
-        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && fix_entry(i) )
+        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 
+             fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) )
             break;
     }
 
+out:
     unmap_domain_page(pt);
 
     return found;
-#undef MATCH_ENTRY
 }
 
 int shadow_remove_all_write_access(
@@ -2138,8 +2119,8 @@
 {
     int i;
     struct shadow_status *a;
-    u32 found = 0, fixups, write_refs;
-    unsigned long prediction, predicted_gpfn, predicted_smfn;
+    u32 found = 0, write_refs;
+    unsigned long predicted_smfn;
 
     ASSERT(shadow_lock_is_acquired(d));
     ASSERT(VALID_MFN(readonly_gmfn));
@@ -2169,27 +2150,19 @@
         perfc_incrc(remove_write_no_work);
         return 1;
     }
-
-    // Before searching all the L1 page tables, check the typical culprit first
-    //
-    if ( (prediction = predict_writable_pte_page(d, readonly_gpfn)) )
-    {
-        predicted_gpfn = prediction & PGT_mfn_mask;
-        if ( (predicted_smfn = __shadow_status(d, predicted_gpfn, 
PGT_l1_shadow)) &&
-             (fixups = remove_all_write_access_in_ptpage(d, predicted_gpfn, 
predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, prediction)) )
-        {
-            found += fixups;
-            if ( found == write_refs )
-            {
-                perfc_incrc(remove_write_predicted);
-                return 1;
-            }
-        }
-        else
-        {
-            perfc_incrc(remove_write_bad_prediction);
-            decrease_writable_pte_prediction(d, readonly_gpfn, prediction);
-        }
+    
+    if ( shadow_mode_external(d) ) {
+        if (write_refs-- == 0) 
+            return 0;
+
+         // Use the back pointer to locate the shadow page that can contain
+         // the PTE of interest
+         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) 
) {
+             found += remove_all_write_access_in_ptpage(
+                 d, predicted_smfn, predicted_smfn, readonly_gpfn, 
readonly_gmfn, write_refs, 0);
+             if ( found == write_refs )
+                 return 0;
+         }
     }
 
     // Search all the shadow L1 page tables...
@@ -2203,7 +2176,7 @@
             {
                 found += remove_all_write_access_in_ptpage(d, 
a->gpfn_and_flags & PGT_mfn_mask, a->smfn, readonly_gpfn, readonly_gmfn, 
write_refs - found, a->gpfn_and_flags & PGT_mfn_mask);
                 if ( found == write_refs )
-                    return 1;
+                    return 0;
             }
 
             a = a->next;
@@ -2349,6 +2322,7 @@
             l1_pgentry_t *guest1 = guest;
             l1_pgentry_t *shadow1 = shadow;
             l1_pgentry_t *snapshot1 = snapshot;
+            int unshadow_l1 = 0;
 
             ASSERT(VM_ASSIST(d, VMASST_TYPE_writable_pagetables) ||
                    shadow_mode_write_all(d));
@@ -2375,19 +2349,39 @@
                 if ( (i < min_snapshot) || (i > max_snapshot) ||
                      l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
                 {
-                    need_flush |= validate_pte_change(d, guest1[i], 
&shadow1[i]);
+                    int error;
+
+                    error = validate_pte_change(d, guest1[i], &shadow1[i]);
+                    if ( error ==  -1 ) 
+                        unshadow_l1 = 1;
+                    else {
+                        need_flush |= error;
+                        set_guest_back_ptr(d, shadow1[i], smfn, i);
+                    }
 
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
                     //
                     // snapshot[i] = new_pte;
-
                     changed++;
                 }
             }
             perfc_incrc(resync_l1);
             perfc_incr_histo(wpt_updates, changed, PT_UPDATES);
             perfc_incr_histo(l1_entries_checked, max_shadow - min_shadow + 1, 
PT_UPDATES);
+            if (unshadow_l1) {
+                l2_pgentry_t l2e;
+
+                __shadow_get_l2e(entry->v, entry->va, &l2e);
+                if (l2e_get_flags(l2e) & _PAGE_PRESENT) {
+                    l2e_remove_flags(l2e, _PAGE_PRESENT);
+                    __shadow_set_l2e(entry->v, entry->va, l2e);
+
+                    if (entry->v == current)
+                        need_flush = 1;
+                }
+            }
+
             break;
         }
         case PGT_l2_shadow:
@@ -2530,6 +2524,8 @@
              !shadow_get_page_from_l1e(npte, d) )
             BUG();
         *ppte = npte;
+        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 
+                           (entry->writable_pl1e & 
~PAGE_MASK)/sizeof(l1_pgentry_t));
         shadow_put_page_from_l1e(opte, d);
 
         unmap_domain_page(ppte);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/shadow_public.c      Mon Oct 24 15:08:13 2005
@@ -879,14 +879,6 @@
     if ( unlikely(!shadow_mode_enabled(d)) )
         return;
 
-    /*
-     * Currently this does not fix up page ref counts, so it is valid to call
-     * only when a domain is being destroyed.
-     */
-    BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags) &&
-           shadow_mode_refcounts(d));
-    d->arch.shadow_tainted_refcnts = shadow_mode_refcounts(d);
-
     free_shadow_pages(d);
     free_writable_pte_predictions(d);
 
@@ -1095,7 +1087,12 @@
             if ( !get_page_type(page, PGT_writable_page) )
                 BUG();
             put_page_type(page);
-
+            /*
+             * We use tlbflush_timestamp as back pointer to smfn, and need to
+             * clean up it.
+             */
+            if ( shadow_mode_external(d) )
+                page->tlbflush_timestamp = 0;
             list_ent = page->list.next;
         }
     }
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/time.c       Mon Oct 24 15:08:13 2005
@@ -61,12 +61,23 @@
 
 static struct cpu_time cpu_time[NR_CPUS];
 
-/* Protected by platform_timer_lock. */
+/*
+ * Protected by platform_timer_lock, which must be acquired with interrupts
+ * disabled because pit_overflow() is called from PIT ch0 interrupt context.
+ */
 static s_time_t stime_platform_stamp;
 static u64 platform_timer_stamp;
 static struct time_scale platform_timer_scale;
 static spinlock_t platform_timer_lock = SPIN_LOCK_UNLOCKED;
 static u64 (*read_platform_count)(void);
+
+/*
+ * Folding 16-bit PIT into 64-bit software counter is a really critical
+ * operation! We therefore do it directly in PIT ch0 interrupt handler,
+ * based on this flag.
+ */
+static int using_pit;
+static void pit_overflow(void);
 
 /*
  * 32-bit division of integer dividend and integer divisor yielding
@@ -135,14 +146,16 @@
 
 void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
 {
+    ASSERT(local_irq_is_enabled());
+
     if ( timer_ack ) 
     {
         extern spinlock_t i8259A_lock;
-        spin_lock(&i8259A_lock);
+        spin_lock_irq(&i8259A_lock);
         outb(0x0c, 0x20);
         /* Ack the IRQ; AEOI will end it automatically. */
         inb(0x20);
-        spin_unlock(&i8259A_lock);
+        spin_unlock_irq(&i8259A_lock);
     }
     
     /* Update jiffies counter. */
@@ -151,6 +164,9 @@
     /* Rough hack to allow accurate timers to sort-of-work with no APIC. */
     if ( !cpu_has_apic )
         raise_softirq(AC_TIMER_SOFTIRQ);
+
+    if ( using_pit )
+        pit_overflow();
 }
 
 static struct irqaction irq0 = { timer_interrupt, "timer", NULL};
@@ -280,7 +296,6 @@
 /* Protected by platform_timer_lock. */
 static u64 pit_counter64;
 static u16 pit_stamp;
-static struct ac_timer pit_overflow_timer;
 
 static u16 pit_read_counter(void)
 {
@@ -292,17 +307,15 @@
     return count;
 }
 
-static void pit_overflow(void *unused)
+static void pit_overflow(void)
 {
     u16 counter;
 
-    spin_lock(&platform_timer_lock);
+    spin_lock_irq(&platform_timer_lock);
     counter = pit_read_counter();
     pit_counter64 += (u16)(pit_stamp - counter);
     pit_stamp = counter;
-    spin_unlock(&platform_timer_lock);
-
-    set_ac_timer(&pit_overflow_timer, NOW() + MILLISECS(20));
+    spin_unlock_irq(&platform_timer_lock);
 }
 
 static u64 read_pit_count(void)
@@ -314,12 +327,12 @@
 {
     read_platform_count = read_pit_count;
 
-    init_ac_timer(&pit_overflow_timer, pit_overflow, NULL, 0);
-    pit_overflow(NULL);
+    pit_overflow();
     platform_timer_stamp = pit_counter64;
     set_time_scale(&platform_timer_scale, CLOCK_TICK_RATE);
 
     printk("Platform timer is %s PIT\n", freq_string(CLOCK_TICK_RATE));
+    using_pit = 1;
 
     return 1;
 }
@@ -337,11 +350,11 @@
 {
     u32 counter;
 
-    spin_lock(&platform_timer_lock);
+    spin_lock_irq(&platform_timer_lock);
     counter = hpet_read32(HPET_COUNTER);
     hpet_counter64 += (u32)(counter - hpet_stamp);
     hpet_stamp = counter;
-    spin_unlock(&platform_timer_lock);
+    spin_unlock_irq(&platform_timer_lock);
 
     set_ac_timer(&hpet_overflow_timer, NOW() + hpet_overflow_period);
 }
@@ -455,11 +468,11 @@
 {
     u32 counter;
 
-    spin_lock(&platform_timer_lock);
+    spin_lock_irq(&platform_timer_lock);
     counter = *cyclone_timer;
     cyclone_counter64 += (u32)(counter - cyclone_stamp);
     cyclone_stamp = counter;
-    spin_unlock(&platform_timer_lock);
+    spin_unlock_irq(&platform_timer_lock);
 
     set_ac_timer(&cyclone_overflow_timer, NOW() + MILLISECS(20000));
 }
@@ -526,10 +539,10 @@
     u64 counter;
     s_time_t stime;
 
-    spin_lock(&platform_timer_lock);
+    spin_lock_irq(&platform_timer_lock);
     counter = read_platform_count();
     stime   = __read_platform_stime(counter);
-    spin_unlock(&platform_timer_lock);
+    spin_unlock_irq(&platform_timer_lock);
 
     return stime;
 }
@@ -539,12 +552,12 @@
     u64 counter;
     s_time_t stamp;
 
-    spin_lock(&platform_timer_lock);
+    spin_lock_irq(&platform_timer_lock);
     counter = read_platform_count();
     stamp   = __read_platform_stime(counter);
     stime_platform_stamp = stamp;
     platform_timer_stamp = counter;
-    spin_unlock(&platform_timer_lock);
+    spin_unlock_irq(&platform_timer_lock);
 }
 
 static void init_platform_timer(void)
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/vmx.c        Mon Oct 24 15:08:13 2005
@@ -57,6 +57,47 @@
 #define TRACE_VMEXIT(index,value) ((void)0)
 #endif
 
+static int vmx_switch_on;
+
+void vmx_final_setup_guest(struct vcpu *v)
+{
+    v->arch.schedule_tail = arch_vmx_do_launch;
+
+    if ( v == v->domain->vcpu[0] )
+    {
+        /*
+         * Required to do this once per domain
+         * XXX todo: add a seperate function to do these.
+         */
+        memset(&v->domain->shared_info->evtchn_mask[0], 0xff,
+               sizeof(v->domain->shared_info->evtchn_mask));
+
+        /* Put the domain in shadow mode even though we're going to be using
+         * the shared 1:1 page table initially. It shouldn't hurt */
+        shadow_mode_enable(v->domain,
+                           SHM_enable|SHM_refcounts|
+                           SHM_translate|SHM_external);
+    }
+
+    vmx_switch_on = 1;
+}
+
+void vmx_relinquish_resources(struct vcpu *v)
+{
+    if ( !VMX_DOMAIN(v) )
+        return;
+
+    if (v->vcpu_id == 0) {
+        /* unmap IO shared page */
+        struct domain *d = v->domain;
+        unmap_domain_page((void *)d->arch.vmx_platform.shared_page_va);
+    }
+
+    destroy_vmcs(&v->arch.arch_vmx);
+    free_monitor_pagetable(v);
+    rem_ac_timer(&v->domain->arch.vmx_platform.vmx_pit.pit_timer);
+}
+
 #ifdef __x86_64__
 static struct msr_state percpu_msr[NR_CPUS];
 
@@ -76,6 +117,9 @@
 {
     struct msr_state *host_state;
     host_state = &percpu_msr[smp_processor_id()];
+
+    if ( !vmx_switch_on )
+        return;
 
     while (host_state->flags){
         int i;
@@ -471,6 +515,7 @@
 #endif
 
         /* Unsupportable for virtualised CPUs. */
+        clear_bit(X86_FEATURE_VMXE & 31, &ecx);
         clear_bit(X86_FEATURE_MWAIT & 31, &ecx);
     }
 
@@ -645,13 +690,13 @@
     } else
         p->u.data = value;
 
+    if (vmx_portio_intercept(p)) {
+        p->state = STATE_IORESP_READY;
+        vmx_io_assist(v);
+        return;
+    }
+
     p->state = STATE_IOREQ_READY;
-
-    if (vmx_portio_intercept(p)) {
-        /* no blocking & no evtchn notification */
-        clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
-        return;
-    }
 
     evtchn_send(iopacket_port(v->domain));
     vmx_wait_io();
@@ -1673,7 +1718,7 @@
             store_cpu_user_regs(&regs);
             __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
 
-            set_bit(_VCPUF_ctrl_pause, &current->vcpu_flags);
+            domain_pause_for_debugger();
             do_sched_op(SCHEDOP_yield);
 
             break;
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/vmx_intercept.c
--- a/xen/arch/x86/vmx_intercept.c      Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/vmx_intercept.c      Mon Oct 24 15:08:13 2005
@@ -32,31 +32,36 @@
 
 #ifdef CONFIG_VMX
 
-/* Check if the request is handled inside xen
-   return value: 0 --not handled; 1 --handled */
+/*
+ * Check if the request is handled inside xen
+ * return value: 0 --not handled; 1 --handled
+ */
 int vmx_io_intercept(ioreq_t *p, int type)
 {
-    struct vcpu *d = current;
-    struct vmx_handler_t *handler = 
&(d->domain->arch.vmx_platform.vmx_handler);
+    struct vcpu *v = current;
+    struct vmx_io_handler *handler =
+                           &(v->domain->arch.vmx_platform.vmx_io_handler);
     int i;
-    unsigned long addr, offset;
+    unsigned long addr, size;
+
     for (i = 0; i < handler->num_slot; i++) {
         if( type != handler->hdl_list[i].type)
             continue;
-        addr   = handler->hdl_list[i].addr;
-        offset = handler->hdl_list[i].offset;
+        addr = handler->hdl_list[i].addr;
+        size = handler->hdl_list[i].size;
         if (p->addr >= addr &&
-            p->addr <  addr + offset)
+            p->addr <  addr + size)
             return handler->hdl_list[i].action(p);
     }
     return 0;
 }
 
-int register_io_handler(unsigned long addr, unsigned long offset, 
+int register_io_handler(unsigned long addr, unsigned long size,
                         intercept_action_t action, int type)
 {
-    struct vcpu *d = current;
-    struct vmx_handler_t *handler = 
&(d->domain->arch.vmx_platform.vmx_handler);
+    struct vcpu *v = current;
+    struct vmx_io_handler *handler =
+                             &(v->domain->arch.vmx_platform.vmx_io_handler);
     int num = handler->num_slot;
 
     if (num >= MAX_IO_HANDLER) {
@@ -65,15 +70,15 @@
     }
 
     handler->hdl_list[num].addr = addr;
-    handler->hdl_list[num].offset = offset;
+    handler->hdl_list[num].size = size;
     handler->hdl_list[num].action = action;
     handler->hdl_list[num].type = type;
     handler->num_slot++;
+
     return 1;
-
-}
-
-static void pit_cal_count(struct vmx_virpit_t *vpit)
+}
+
+static void pit_cal_count(struct vmx_virpit *vpit)
 {
     u64 nsec_delta = (unsigned int)((NOW() - vpit->inject_point));
     if (nsec_delta > vpit->period)
@@ -81,7 +86,7 @@
     vpit->count = vpit->init_val - ((nsec_delta * PIT_FREQ / 1000000000ULL) % 
vpit->init_val );
 }
 
-static void pit_latch_io(struct vmx_virpit_t *vpit)
+static void pit_latch_io(struct vmx_virpit *vpit)
 {
     pit_cal_count(vpit);
 
@@ -103,11 +108,11 @@
         vpit->count_MSB_latched=1;
         break;
     default:
-        BUG();
-    }
-}
-
-static int pit_read_io(struct vmx_virpit_t *vpit)
+        domain_crash_synchronous();
+    }
+}
+
+static int pit_read_io(struct vmx_virpit *vpit)
 {
     if(vpit->count_LSB_latched) {
         /* Read Least Significant Byte */
@@ -168,8 +173,8 @@
 /* the intercept action for PIT DM retval:0--not handled; 1--handled */
 int intercept_pit_io(ioreq_t *p)
 {
-    struct vcpu *d = current;
-    struct vmx_virpit_t *vpit = &(d->domain->arch.vmx_platform.vmx_pit);
+    struct vcpu *v = current;
+    struct vmx_virpit *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
 
     if (p->size != 1 ||
         p->pdata_valid ||
@@ -197,15 +202,14 @@
 /* hooks function for the PIT initialization response iopacket */
 static void pit_timer_fn(void *data)
 {
-    struct vmx_virpit_t *vpit = data;
+    struct vmx_virpit *vpit = data;
     s_time_t   next;
     int        missed_ticks;
 
     missed_ticks = (NOW() - vpit->scheduled)/(s_time_t) vpit->period;
 
     /* Set the pending intr bit, and send evtchn notification to myself. */
-    if (test_and_set_bit(vpit->vector, vpit->intr_bitmap))
-        vpit->pending_intr_nr++; /* already set, then count the pending intr */
+    vpit->pending_intr_nr++; /* already set, then count the pending intr */
     evtchn_set_pending(vpit->v, iopacket_port(vpit->v->domain));
 
     /* pick up missed timer tick */
@@ -221,44 +225,37 @@
 /* Only some PIT operations such as load init counter need a hypervisor hook.
  * leave all other operations in user space DM
  */
-void vmx_hooks_assist(struct vcpu *d)
-{
-    vcpu_iodata_t * vio = get_vio(d->domain, d->vcpu_id);
+void vmx_hooks_assist(struct vcpu *v)
+{
+    vcpu_iodata_t *vio = get_vio(v->domain, v->vcpu_id);
     ioreq_t *p = &vio->vp_ioreq;
-    shared_iopage_t *sp = get_sp(d->domain);
-    u64 *intr = &(sp->sp_global.pic_intr[0]);
-    struct vmx_virpit_t *vpit = &(d->domain->arch.vmx_platform.vmx_pit);
+    struct vmx_virpit *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
     int rw_mode, reinit = 0;
-    int oldvec = 0;
 
     /* load init count*/
-    if (p->state == STATE_IORESP_HOOK) { 
+    if (p->state == STATE_IORESP_HOOK) {
         /* set up actimer, handle re-init */
         if ( active_ac_timer(&(vpit->pit_timer)) ) {
             VMX_DBG_LOG(DBG_LEVEL_1, "VMX_PIT: guest reset PIT with channel 
%lx!\n", (unsigned long) ((p->u.data >> 24) & 0x3) );
             rem_ac_timer(&(vpit->pit_timer));
             reinit = 1;
-            oldvec = vpit->vector;
+ 
         }
         else
-            init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, d->processor);
+            init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, v->processor);
 
         /* init count for this channel */
-        vpit->init_val = (p->u.data & 0xFFFF) ; 
+        vpit->init_val = (p->u.data & 0xFFFF) ;
         /* frequency(ns) of pit */
-        vpit->period = DIV_ROUND(((vpit->init_val) * 1000000000ULL), 
PIT_FREQ); 
+        vpit->period = DIV_ROUND(((vpit->init_val) * 1000000000ULL), PIT_FREQ);
         VMX_DBG_LOG(DBG_LEVEL_1,"VMX_PIT: guest set init pit freq:%u ns, 
initval:0x%x\n", vpit->period, vpit->init_val);
         if (vpit->period < 900000) { /* < 0.9 ms */
             printk("VMX_PIT: guest programmed too small an init_val: %x\n",
                    vpit->init_val);
             vpit->period = 1000000;
         }
-        vpit->vector = ((p->u.data >> 16) & 0xFF);
-
-        if( reinit && oldvec != vpit->vector){
-            clear_bit(oldvec, intr);
-            vpit->pending_intr_nr = 0;
-        }
+         vpit->period_cycles = (u64)vpit->period * cpu_khz / 1000000L;
+         printk("VMX_PIT: guest freq in cycles=%lld\n",(long 
long)vpit->period_cycles);
 
         vpit->channel = ((p->u.data >> 24) & 0x3);
         vpit->first_injected = 0;
@@ -282,8 +279,7 @@
             break;
         }
 
-        vpit->intr_bitmap = intr;
-        vpit->v = d;
+        vpit->v = v;
 
         vpit->scheduled = NOW() + vpit->period;
         set_ac_timer(&vpit->pit_timer, vpit->scheduled);
@@ -292,8 +288,9 @@
         p->state = STATE_IORESP_READY;
 
         /* register handler to intercept the PIT io when vm_exit */
-        if (!reinit)
+        if (!reinit) {
             register_portio_handler(0x40, 4, intercept_pit_io); 
+        }
     }
 }
 #endif /* CONFIG_VMX */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c     Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/vmx_io.c     Mon Oct 24 15:08:13 2005
@@ -37,6 +37,7 @@
 #include <asm/shadow.h>
 
 #include <public/io/ioreq.h>
+#include <public/io/vmx_vpic.h>
 #include <public/io/vmx_vlapic.h>
 
 #ifdef CONFIG_VMX
@@ -624,6 +625,17 @@
         set_eflags_SF(size, diff, regs);
         set_eflags_PF(size, diff, regs);
         break;
+
+    case INSTR_BT:
+        index = operand_index(src);
+        value = get_reg_value(size, index, 0, regs);
+
+        if (p->u.data & (1 << (value & ((1 << 5) - 1))))
+            regs->eflags |= X86_EFLAGS_CF;
+        else
+            regs->eflags &= ~X86_EFLAGS_CF;
+
+        break;
     }
 
     load_cpu_user_regs(regs);
@@ -673,15 +685,15 @@
     struct domain *d = v->domain;
     int port = iopacket_port(d);
 
-    /* evtchn_pending is shared by other event channels in 0-31 range */
-    if (!d->shared_info->evtchn_pending[port>>5])
-        clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
-
-    /* Note: VMX domains may need upcalls as well */
+    /* evtchn_pending_sel bit is shared by other event channels. */
+    if (!d->shared_info->evtchn_pending[port/BITS_PER_LONG])
+        clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
+
+    /* Note: VMX domains may need upcalls as well. */
     if (!v->vcpu_info->evtchn_pending_sel)
         clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
 
-    /* clear the pending bit for port */
+    /* Clear the pending bit for port. */
     return test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]);
 }
 
@@ -715,7 +727,7 @@
             break;
         /* Events other than IOPACKET_PORT might have woken us up. In that
            case, safely go back to sleep. */
-        clear_bit(port>>5, &current->vcpu_info->evtchn_pending_sel);
+        clear_bit(port/BITS_PER_LONG, &current->vcpu_info->evtchn_pending_sel);
         clear_bit(0, &current->vcpu_info->evtchn_upcall_pending);
     } while(1);
 }
@@ -783,75 +795,33 @@
 }
 
 #define BSP_CPU(v)    (!(v->vcpu_id))
-static inline void clear_extint(struct vcpu *v)
-{
-    global_iodata_t *spg;
-    int i;
-    spg = &get_sp(v->domain)->sp_global;
-
-    for(i = 0; i < INTR_LEN; i++)
-        spg->pic_intr[i] = 0;
-}
-
-static inline void clear_highest_bit(struct vcpu *v, int vector)
-{
-    global_iodata_t *spg;
-
-    spg = &get_sp(v->domain)->sp_global;
-
-    clear_bit(vector, &spg->pic_intr[0]);
-}
-
-static inline int find_highest_pic_irq(struct vcpu *v)
-{
-    u64 intr[INTR_LEN];
-    global_iodata_t *spg;
-    int i;
-
-    if(!BSP_CPU(v))
-        return -1;
-
-    spg = &get_sp(v->domain)->sp_global;
-
-    for(i = 0; i < INTR_LEN; i++){
-        intr[i] = spg->pic_intr[i] & ~spg->pic_mask[i];
-    }
-
-    return find_highest_irq((u32 *)&intr[0]);
-}
-
-/*
- * Return 0-255 for pending irq.
- *        -1 when no pending.
- */
-static inline int find_highest_pending_irq(struct vcpu *v, int *type)
-{
-    int result = -1;
-    if ((result = find_highest_pic_irq(v)) != -1){
-        *type = VLAPIC_DELIV_MODE_EXT;
-        return result;
-    }
-    return result;
-}
-
 static inline void
 interrupt_post_injection(struct vcpu * v, int vector, int type)
 {
-    struct vmx_virpit_t *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
+    struct vmx_virpit *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
+    u64    drift;
+
     switch(type)
     {
     case VLAPIC_DELIV_MODE_EXT:
-        if (vpit->pending_intr_nr && vector == vpit->vector)
-            vpit->pending_intr_nr--;
-        else
-            clear_highest_bit(v, vector);
-
-        if (vector == vpit->vector && !vpit->first_injected){
-            vpit->first_injected = 1;
-            vpit->pending_intr_nr = 0;
-        }
-        if (vector == vpit->vector)
+        if ( is_pit_irq(v, vector) ) {
+            if ( !vpit->first_injected ) {
+                vpit->first_injected = 1;
+                vpit->pending_intr_nr = 0;
+            }
+            else {
+                vpit->pending_intr_nr--;
+            }
             vpit->inject_point = NOW();
+            drift = vpit->period_cycles * vpit->pending_intr_nr;
+            drift = v->arch.arch_vmx.tsc_offset - drift;
+            __vmwrite(TSC_OFFSET, drift);
+
+#if defined (__i386__)
+            __vmwrite(TSC_OFFSET_HIGH, (drift >> 32));
+#endif
+ 
+        }
         break;
 
     default:
@@ -881,6 +851,38 @@
 static inline int irq_masked(unsigned long eflags)
 {
     return ((eflags & X86_EFLAGS_IF) == 0);
+}
+
+void pic_irq_request(int *interrupt_request, int level)
+{
+    if (level)
+        *interrupt_request = 1;
+    else
+        *interrupt_request = 0;
+}
+
+void vmx_pic_assist(struct vcpu *v)
+{
+    global_iodata_t *spg;
+    u16   *virq_line, irqs;
+    struct vmx_virpic *pic = &v->domain->arch.vmx_platform.vmx_pic;
+    
+    spg = &get_sp(v->domain)->sp_global;
+    virq_line  = &spg->pic_clear_irr;
+    if ( *virq_line ) {
+        do {
+            irqs = *(volatile u16*)virq_line;
+        } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
+        do_pic_irqs_clear(pic, irqs);
+    }
+    virq_line  = &spg->pic_irr;
+    if ( *virq_line ) {
+        do {
+            irqs = *(volatile u16*)virq_line;
+        } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
+        do_pic_irqs(pic, irqs);
+    }
+
 }
 
 asmlinkage void vmx_intr_assist(void)
@@ -889,11 +891,18 @@
     int highest_vector;
     unsigned long intr_fields, eflags, interruptibility, cpu_exec_control;
     struct vcpu *v = current;
-
-    highest_vector = find_highest_pending_irq(v, &intr_type);
+    struct vmx_platform *plat=&v->domain->arch.vmx_platform;
+    struct vmx_virpit *vpit = &plat->vmx_pit;
+    struct vmx_virpic *pic= &plat->vmx_pic;
+
+    vmx_pic_assist(v);
     __vmread_vcpu(v, CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
-
-    if (highest_vector == -1) {
+    if ( vpit->pending_intr_nr ) {
+        pic_set_irq(pic, 0, 0);
+        pic_set_irq(pic, 0, 1);
+    }
+
+    if ( !plat->interrupt_request ) {
         disable_irq_window(cpu_exec_control);
         return;
     }
@@ -910,22 +919,20 @@
 
     if (interruptibility) {
         enable_irq_window(cpu_exec_control);
-        VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, interruptibility: %lx",
-                    highest_vector, interruptibility);
+        VMX_DBG_LOG(DBG_LEVEL_1, "interruptibility: %lx",interruptibility);
         return;
     }
 
     __vmread(GUEST_RFLAGS, &eflags);
+    if (irq_masked(eflags)) {
+        enable_irq_window(cpu_exec_control);
+        return;
+    }
+    plat->interrupt_request = 0;
+    highest_vector = cpu_get_pic_interrupt(v, &intr_type); 
 
     switch (intr_type) {
     case VLAPIC_DELIV_MODE_EXT:
-        if (irq_masked(eflags)) {
-            enable_irq_window(cpu_exec_control);
-            VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx",
-                        highest_vector, eflags);
-            return;
-        }
-
         vmx_inject_extint(v, highest_vector, VMX_INVALID_ERROR_CODE);
         TRACE_3D(TRC_VMX_INT, v->domain->domain_id, highest_vector, 0);
         break;
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/vmx_platform.c       Mon Oct 24 15:08:13 2005
@@ -371,7 +371,7 @@
     unsigned long eflags;
     int index, vm86 = 0;
     unsigned char rex = 0;
-    unsigned char tmp_size = 0;
+    unsigned char size_reg = 0;
 
     init_instruction(instr);
 
@@ -428,33 +428,47 @@
 
     case 0x80:
     case 0x81:
-        if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */
-            instr->instr = INSTR_CMP;
-
-            if (opcode[0] == 0x80)
-                GET_OP_SIZE_FOR_BYTE(instr->op_size);
-            else
+        {
+            unsigned char ins_subtype = (opcode[1] >> 3) & 7;
+
+            if (opcode[0] == 0x80) {
+                GET_OP_SIZE_FOR_BYTE(size_reg);
+                instr->op_size = BYTE;
+            } else {
                 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-
-            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
-            instr->immediate = get_immediate(vm86, opcode+1, BYTE);
-            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
-
-            return DECODE_success;
-        } else
-            return DECODE_failure;
+                size_reg = instr->op_size;
+            }
+
+            instr->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
+            instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
+            instr->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
+
+            switch (ins_subtype) {
+                case 7: /* cmp $imm, m32/16 */
+                    instr->instr = INSTR_CMP;
+                    return DECODE_success;
+
+                case 1: /* or $imm, m32/16 */
+                    instr->instr = INSTR_OR;
+                    return DECODE_success;
+
+                default:
+                    printf("%x, This opcode isn't handled yet!\n", *opcode);
+                    return DECODE_failure;
+            }
+        }
 
     case 0x84:  /* test m8, r8 */
         instr->instr = INSTR_TEST;
         instr->op_size = BYTE;
-        GET_OP_SIZE_FOR_BYTE(tmp_size);
-        return mem_reg(tmp_size, opcode, instr, rex);
+        GET_OP_SIZE_FOR_BYTE(size_reg);
+        return mem_reg(size_reg, opcode, instr, rex);
 
     case 0x88: /* mov r8, m8 */
         instr->instr = INSTR_MOV;
         instr->op_size = BYTE;
-        GET_OP_SIZE_FOR_BYTE(tmp_size);
-        return reg_mem(tmp_size, opcode, instr, rex);
+        GET_OP_SIZE_FOR_BYTE(size_reg);
+        return reg_mem(size_reg, opcode, instr, rex);
 
     case 0x89: /* mov r32/16, m32/16 */
         instr->instr = INSTR_MOV;
@@ -464,8 +478,8 @@
     case 0x8A: /* mov m8, r8 */
         instr->instr = INSTR_MOV;
         instr->op_size = BYTE;
-        GET_OP_SIZE_FOR_BYTE(tmp_size);
-        return mem_reg(tmp_size, opcode, instr, rex);
+        GET_OP_SIZE_FOR_BYTE(size_reg);
+        return mem_reg(size_reg, opcode, instr, rex);
 
     case 0x8B: /* mov m32/16, r32/16 */
         instr->instr = INSTR_MOV;
@@ -475,8 +489,8 @@
     case 0xA0: /* mov <addr>, al */
         instr->instr = INSTR_MOV;
         instr->op_size = BYTE;
-        GET_OP_SIZE_FOR_BYTE(tmp_size);
-        return mem_acc(tmp_size, instr);
+        GET_OP_SIZE_FOR_BYTE(size_reg);
+        return mem_acc(size_reg, instr);
 
     case 0xA1: /* mov <addr>, ax/eax */
         instr->instr = INSTR_MOV;
@@ -486,8 +500,8 @@
     case 0xA2: /* mov al, <addr> */
         instr->instr = INSTR_MOV;
         instr->op_size = BYTE;
-        GET_OP_SIZE_FOR_BYTE(tmp_size);
-        return acc_mem(tmp_size, instr);
+        GET_OP_SIZE_FOR_BYTE(size_reg);
+        return acc_mem(size_reg, instr);
 
     case 0xA3: /* mov ax/eax, <addr> */
         instr->instr = INSTR_MOV;
@@ -541,13 +555,21 @@
             return DECODE_failure;
 
     case 0xF6:
-        if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */
+    case 0xF7:
+        if (((opcode[1] >> 3) & 7) == 0) { /* test $imm8/16/32, m8/16/32 */
             instr->instr = INSTR_TEST;
-            instr->op_size = BYTE;
-
-            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
+
+            if (opcode[0] == 0xF6) {
+                GET_OP_SIZE_FOR_BYTE(size_reg);
+                instr->op_size = BYTE;
+            } else {
+                GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+                size_reg = instr->op_size;
+            }
+
+            instr->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
             instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
-            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+            instr->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
 
             return DECODE_success;
         } else
@@ -583,6 +605,14 @@
         instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY);
         return DECODE_success;
 
+    case 0xA3: /* bt r32, m32 */
+        instr->instr = INSTR_BT;
+        index = get_index(opcode + 1, rex);
+        instr->op_size = LONG;
+        instr->operand[0] = mk_operand(instr->op_size, index, 0, REGISTER);
+        instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+        return DECODE_success;
+
     default:
         printf("0f %x, This opcode isn't handled yet\n", *opcode);
         return DECODE_failure;
@@ -643,13 +673,13 @@
     } else
         p->u.data = value;
 
-    p->state = STATE_IOREQ_READY;
-
     if (vmx_mmio_intercept(p)){
         p->state = STATE_IORESP_READY;
         vmx_io_assist(v);
         return;
     }
+
+    p->state = STATE_IOREQ_READY;
 
     evtchn_send(iopacket_port(v->domain));
     vmx_wait_io();
@@ -843,8 +873,27 @@
         mmio_opp->immediate = mmio_inst.immediate;
 
         /* send the request and wait for the value */
-        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, mmio_inst.op_size, 0, 
IOREQ_READ, 0);
-        break;
+        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1,
+                      mmio_inst.op_size, 0, IOREQ_READ, 0);
+        break;
+
+    case INSTR_BT:
+        {
+            unsigned long value = 0;
+            int index, size;
+
+            mmio_opp->instr = mmio_inst.instr;
+            mmio_opp->operand[0] = mmio_inst.operand[0]; /* bit offset */
+            mmio_opp->operand[1] = mmio_inst.operand[1]; /* bit base */
+
+            index = operand_index(mmio_inst.operand[0]);
+            size = operand_size(mmio_inst.operand[0]);
+            value = get_reg_value(size, index, 0, regs);
+
+            send_mmio_req(IOREQ_TYPE_COPY, gpa + (value >> 5), 1,
+                          mmio_inst.op_size, 0, IOREQ_READ, 0);
+            break;
+        }
 
     default:
         printf("Unhandled MMIO instruction\n");
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Mon Oct 24 15:08:13 2005
@@ -142,7 +142,7 @@
 #endif
 };
 
-static void vmx_setup_platform(struct vcpu *v)
+static void vmx_map_io_shared_page(struct domain *d)
 {
     int i;
     unsigned char e820_map_nr;
@@ -197,12 +197,61 @@
         printk("Can not map io request shared page for VMX domain.\n");
         domain_crash();
     }
-    v->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
-
-    VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(v->domain));
-
-    clear_bit(iopacket_port(v->domain),
-              &v->domain->shared_info->evtchn_mask[0]);
+    d->arch.vmx_platform.shared_page_va = (unsigned long)p;
+
+    VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d));
+
+    clear_bit(iopacket_port(d),
+              &d->shared_info->evtchn_mask[0]);
+}
+
+#define VCPU_NR_PAGE        0x0009F000
+#define VCPU_NR_OFFSET      0x00000800
+#define VCPU_MAGIC          0x76637075  /* "vcpu" */
+
+static void vmx_set_vcpu_nr(struct domain *d)
+{
+    unsigned char *p;
+    unsigned long mpfn;
+    unsigned int *vcpus;
+
+    mpfn = get_mfn_from_pfn(VCPU_NR_PAGE >> PAGE_SHIFT);
+    if (mpfn == INVALID_MFN) {
+        printk("Can not get vcpu number page mfn for VMX domain.\n");
+        domain_crash_synchronous();
+    }
+
+    p = map_domain_page(mpfn);
+    if (p == NULL) {
+        printk("Can not map vcpu number page for VMX domain.\n");
+        domain_crash_synchronous();
+    }
+
+    vcpus = (unsigned int *)(p + VCPU_NR_OFFSET);
+    if (vcpus[0] != VCPU_MAGIC) {
+        printk("Bad vcpus magic, set vcpu number to 1 by default.\n");
+        d->arch.vmx_platform.nr_vcpu = 1;
+    }
+
+    d->arch.vmx_platform.nr_vcpu = vcpus[1];
+
+    unmap_domain_page(p);
+}
+
+static void vmx_setup_platform(struct domain* d)
+{
+    struct vmx_platform *platform;
+
+    if (!(VMX_DOMAIN(current) && (current->vcpu_id == 0)))
+        return;
+
+    vmx_map_io_shared_page(d);
+    vmx_set_vcpu_nr(d);
+
+    platform = &d->arch.vmx_platform;
+    pic_init(&platform->vmx_pic,  pic_irq_request, 
+             &platform->interrupt_request);
+    register_pic_io_hook();
 }
 
 static void vmx_set_host_env(struct vcpu *v)
@@ -234,9 +283,10 @@
 {
 /* Update CR3, GDT, LDT, TR */
     unsigned int  error = 0;
-    unsigned long pfn = 0;
     unsigned long cr0, cr4;
-    struct pfn_info *page;
+
+    if (v->vcpu_id == 0)
+        vmx_setup_platform(v->domain);
 
     __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
 
@@ -262,11 +312,6 @@
     error |= __vmwrite(CR4_READ_SHADOW, cr4);
 
     vmx_stts();
-
-    page = (struct pfn_info *) alloc_domheap_page(NULL);
-    pfn = (unsigned long) (page - frame_table);
-
-    vmx_setup_platform(v);
 
     vmx_set_host_env(v);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/acm_ops.c
--- a/xen/common/acm_ops.c      Fri Oct 21 19:58:39 2005
+++ b/xen/common/acm_ops.c      Mon Oct 24 15:08:13 2005
@@ -31,22 +31,23 @@
 
 #if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
 
-long do_acm_op(acm_op_t * u_acm_op)
+long do_acm_op(struct acm_op * u_acm_op)
 {
     return -ENOSYS;
 }
 
 #else
 
-typedef enum acm_operation {
+enum acm_operation {
     POLICY,                     /* access to policy interface (early drop) */
     GETPOLICY,                  /* dump policy cache */
     SETPOLICY,                  /* set policy cache (controls security) */
     DUMPSTATS,                  /* dump policy statistics */
-    GETSSID                     /* retrieve ssidref for domain id */
-} acm_operation_t;
-
-int acm_authorize_acm_ops(struct domain *d, acm_operation_t pops)
+    GETSSID,                    /* retrieve ssidref for domain id (decide 
inside authorized domains) */
+    GETDECISION                 /* retrieve ACM decision from authorized 
domains */
+};
+
+int acm_authorize_acm_ops(struct domain *d, enum acm_operation pops)
 {
     /* all policy management functions are restricted to privileged domains,
      * soon we will introduce finer-grained privileges for policy operations
@@ -59,10 +60,10 @@
     return ACM_ACCESS_PERMITTED;
 }
 
-long do_acm_op(acm_op_t * u_acm_op)
+long do_acm_op(struct acm_op * u_acm_op)
 {
     long ret = 0;
-    acm_op_t curop, *op = &curop;
+    struct acm_op curop, *op = &curop;
 
     /* check here policy decision for policy commands */
     /* for now allow DOM0 only, later indepedently    */
@@ -78,81 +79,148 @@
     switch (op->cmd)
     {
     case ACM_SETPOLICY:
-        {
-            if (acm_authorize_acm_ops(current->domain, SETPOLICY))
-                return -EACCES;
-            printkd("%s: setting policy.\n", __func__);
-            ret = acm_set_policy(op->u.setpolicy.pushcache,
-                                 op->u.setpolicy.pushcache_size, 1);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        if (acm_authorize_acm_ops(current->domain, SETPOLICY))
+            return -EACCES;
+        printkd("%s: setting policy.\n", __func__);
+        ret = acm_set_policy(op->u.setpolicy.pushcache,
+                             op->u.setpolicy.pushcache_size, 1);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
 
     case ACM_GETPOLICY:
-        {
-            if (acm_authorize_acm_ops(current->domain, GETPOLICY))
-                return -EACCES;
-            printkd("%s: getting policy.\n", __func__);
-            ret = acm_get_policy(op->u.getpolicy.pullcache,
-                                 op->u.getpolicy.pullcache_size);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        if (acm_authorize_acm_ops(current->domain, GETPOLICY))
+            return -EACCES;
+        printkd("%s: getting policy.\n", __func__);
+        ret = acm_get_policy(op->u.getpolicy.pullcache,
+                             op->u.getpolicy.pullcache_size);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
 
     case ACM_DUMPSTATS:
-        {
-            if (acm_authorize_acm_ops(current->domain, DUMPSTATS))
-                return -EACCES;
-            printkd("%s: dumping statistics.\n", __func__);
-            ret = acm_dump_statistics(op->u.dumpstats.pullcache,
-                                      op->u.dumpstats.pullcache_size);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        if (acm_authorize_acm_ops(current->domain, DUMPSTATS))
+            return -EACCES;
+        printkd("%s: dumping statistics.\n", __func__);
+        ret = acm_dump_statistics(op->u.dumpstats.pullcache,
+                                  op->u.dumpstats.pullcache_size);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
 
     case ACM_GETSSID:
-        {
-                       ssidref_t ssidref;
-
-            if (acm_authorize_acm_ops(current->domain, GETSSID))
-                return -EACCES;
-
-                       if (op->u.getssid.get_ssid_by == SSIDREF)
-                               ssidref = op->u.getssid.id.ssidref;
-                       else if (op->u.getssid.get_ssid_by == DOMAINID) {
-                               struct domain *subj = 
find_domain_by_id(op->u.getssid.id.domainid);
-                               if (!subj)
-                                       return -ESRCH; /* domain not found */
-
-                               ssidref = ((struct acm_ssid_domain 
*)(subj->ssid))->ssidref;
-                               put_domain(subj);
-                       } else
-                               return -ESRCH;
-
-            ret = acm_get_ssid(ssidref,
-                               op->u.getssid.ssidbuf,
-                               op->u.getssid.ssidbuf_size);
-            if (ret == ACM_OK)
-                ret = 0;
-            else
-                ret = -ESRCH;
-        }
-        break;
+    {
+        ssidref_t ssidref;
+
+        if (acm_authorize_acm_ops(current->domain, GETSSID))
+            return -EACCES;
+        printkd("%s: getting SSID.\n", __func__);
+        if (op->u.getssid.get_ssid_by == SSIDREF)
+            ssidref = op->u.getssid.id.ssidref;
+        else if (op->u.getssid.get_ssid_by == DOMAINID) {
+            struct domain *subj = find_domain_by_id(op->u.getssid.id.domainid);
+            if (!subj)
+                return -ESRCH; /* domain not found */
+
+            ssidref = ((struct acm_ssid_domain *)(subj->ssid))->ssidref;
+            put_domain(subj);
+        } else
+            return -ESRCH;
+
+        ret = acm_get_ssid(ssidref,
+                           op->u.getssid.ssidbuf,
+                           op->u.getssid.ssidbuf_size);
+        if (ret == ACM_OK)
+            ret = 0;
+        else
+            ret = -ESRCH;
+    }
+    break;
+
+    case ACM_GETDECISION:
+    {
+        ssidref_t ssidref1, ssidref2;
+
+        if (acm_authorize_acm_ops(current->domain, GETDECISION)) {
+            ret = -EACCES;
+            goto out;
+        }
+        printkd("%s: getting access control decision.\n", __func__);
+        if (op->u.getdecision.get_decision_by1 == SSIDREF) {
+            ssidref1 = op->u.getdecision.id1.ssidref;
+        }
+        else if (op->u.getdecision.get_decision_by1 == DOMAINID) {
+            struct domain *subj = 
find_domain_by_id(op->u.getdecision.id1.domainid);
+            if (!subj) {
+                ret = -ESRCH; /* domain not found */
+                goto out;
+            }
+            ssidref1 = ((struct acm_ssid_domain *)(subj->ssid))->ssidref;
+            put_domain(subj);
+        } else {
+            ret = -ESRCH;
+            goto out;
+        }
+        if (op->u.getdecision.get_decision_by2 == SSIDREF) {
+            ssidref2 = op->u.getdecision.id2.ssidref;
+        }
+        else if (op->u.getdecision.get_decision_by2 == DOMAINID) {
+            struct domain *subj = 
find_domain_by_id(op->u.getdecision.id2.domainid);
+            if (!subj) {
+                ret = -ESRCH; /* domain not found */
+                goto out;
+            }
+            ssidref2 = ((struct acm_ssid_domain *)(subj->ssid))->ssidref;
+            put_domain(subj);
+        } else {
+            ret = -ESRCH;
+            goto out;
+        }
+        ret = acm_get_decision(ssidref1, ssidref2, op->u.getdecision.hook);
+    }
+    break;
 
     default:
         ret = -ESRCH;
-
-    }
+    }
+
+ out:
+    if (ret == ACM_ACCESS_PERMITTED) {
+        op->u.getdecision.acm_decision = ACM_ACCESS_PERMITTED;
+        ret = 0;
+    } else if  (ret == ACM_ACCESS_DENIED) {
+        op->u.getdecision.acm_decision = ACM_ACCESS_DENIED;
+        ret = 0;
+    } else {
+        op->u.getdecision.acm_decision = ACM_ACCESS_DENIED;
+        if (ret > 0)
+            ret = -ret;
+    }
+    /* copy decision back to user space */
+    copy_to_user(u_acm_op, op, sizeof(*op));
     return ret;
 }
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c     Fri Oct 21 19:58:39 2005
+++ b/xen/common/dom0_ops.c     Mon Oct 24 15:08:13 2005
@@ -43,43 +43,34 @@
 {
     struct vcpu   *v;
     u64 cpu_time = 0;
-    int vcpu_count = 0;
-    int flags = DOMFLAGS_PAUSED | DOMFLAGS_BLOCKED;
+    int flags = DOMFLAGS_BLOCKED;
     
     info->domain = d->domain_id;
-    
-    memset(&info->vcpu_to_cpu, -1, sizeof(info->vcpu_to_cpu));
-    memset(&info->cpumap, 0, sizeof(info->cpumap));
+    info->nr_online_vcpus = 0;
     
     /* 
-     * - domain is marked as paused or blocked only if all its vcpus 
-     *   are paused or blocked 
+     * - domain is marked as blocked only if all its vcpus are blocked
      * - domain is marked as running if any of its vcpus is running
-     * - only map vcpus that aren't down.  Note, at some point we may
-     *   wish to demux the -1 value to indicate down vs. not-ever-booted
-     *   
      */
     for_each_vcpu ( d, v ) {
-        /* only map vcpus that are up */
-        if ( !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
-            info->vcpu_to_cpu[v->vcpu_id] = v->processor;
-        info->cpumap[v->vcpu_id] = v->cpumap;
-        if ( !(v->vcpu_flags & VCPUF_ctrl_pause) )
-            flags &= ~DOMFLAGS_PAUSED;
-        if ( !(v->vcpu_flags & VCPUF_blocked) )
-            flags &= ~DOMFLAGS_BLOCKED;
-        if ( v->vcpu_flags & VCPUF_running )
-            flags |= DOMFLAGS_RUNNING;
         cpu_time += v->cpu_time;
-        vcpu_count++;
+        info->max_vcpu_id = v->vcpu_id;
+        if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
+        {
+            if ( !(v->vcpu_flags & VCPUF_blocked) )
+                flags &= ~DOMFLAGS_BLOCKED;
+            if ( v->vcpu_flags & VCPUF_running )
+                flags |= DOMFLAGS_RUNNING;
+            info->nr_online_vcpus++;
+        }
     }
     
     info->cpu_time = cpu_time;
-    info->n_vcpu = vcpu_count;
     
     info->flags = flags |
-        ((d->domain_flags & DOMF_dying)    ? DOMFLAGS_DYING    : 0) |
-        ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
+        ((d->domain_flags & DOMF_dying)      ? DOMFLAGS_DYING    : 0) |
+        ((d->domain_flags & DOMF_shutdown)   ? DOMFLAGS_SHUTDOWN : 0) |
+        ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED   : 0) |
         d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
 
     if (d->ssid != NULL)
@@ -90,6 +81,8 @@
     info->tot_pages         = d->tot_pages;
     info->max_pages         = d->max_pages;
     info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
+
+    memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
 }
 
 long do_dom0_op(dom0_op_t *u_dom0_op)
@@ -97,6 +90,7 @@
     long ret = 0;
     dom0_op_t curop, *op = &curop;
     void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
+    static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
 
     if ( !IS_PRIV(current->domain) )
         return -EPERM;
@@ -109,6 +103,8 @@
 
     if ( acm_pre_dom0_op(op, &ssid) )
         return -EACCES;
+
+    spin_lock(&dom0_lock);
 
     switch ( op->cmd )
     {
@@ -150,7 +146,7 @@
         {
             ret = -EINVAL;
             if ( (d != current->domain) && 
-                 test_bit(_DOMF_constructed, &d->domain_flags) )
+                 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
             {
                 domain_unpause_by_systemcontroller(d);
                 ret = 0;
@@ -167,17 +163,14 @@
         domid_t        dom;
         struct vcpu   *v;
         unsigned int   i, cnt[NR_CPUS] = { 0 };
-        static spinlock_t alloc_lock = SPIN_LOCK_UNLOCKED;
         static domid_t rover = 0;
-
-        spin_lock(&alloc_lock);
 
         dom = op->u.createdomain.domain;
         if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
         {
             ret = -EINVAL;
             if ( !is_free_domid(dom) )
-                goto alloc_out;
+                break;
         }
         else
         {
@@ -191,7 +184,7 @@
 
             ret = -ENOMEM;
             if ( dom == rover )
-                goto alloc_out;
+                break;
 
             rover = dom;
         }
@@ -206,7 +199,7 @@
         /*
          * If we're on a HT system, we only use the first HT for dom0, other 
          * domains will all share the second HT of each CPU. Since dom0 is on 
-            * CPU 0, we favour high numbered CPUs in the event of a tie.
+         * CPU 0, we favour high numbered CPUs in the event of a tie.
          */
         pro = smp_num_siblings - 1;
         for ( i = pro; i < num_online_cpus(); i += smp_num_siblings )
@@ -215,15 +208,60 @@
 
         ret = -ENOMEM;
         if ( (d = do_createdomain(dom, pro)) == NULL )
-            goto alloc_out;
+            break;
+
+        memcpy(d->handle, op->u.createdomain.handle,
+               sizeof(xen_domain_handle_t));
 
         ret = 0;
 
         op->u.createdomain.domain = d->domain_id;
         copy_to_user(u_dom0_op, op, sizeof(*op));
-
-    alloc_out:
-        spin_unlock(&alloc_lock);
+    }
+    break;
+
+    case DOM0_MAX_VCPUS:
+    {
+        struct domain *d;
+        unsigned int i, max = op->u.max_vcpus.max, cpu;
+
+        ret = -EINVAL;
+        if ( max > MAX_VIRT_CPUS )
+            break;
+
+        ret = -ESRCH;
+        if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
+            break;
+
+        /*
+         * Can only create new VCPUs while the domain is not fully constructed
+         * (and hence not runnable). Xen needs auditing for races before
+         * removing this check.
+         */
+        ret = -EINVAL;
+        if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
+            goto maxvcpu_out;
+
+        /* We cannot reduce maximum VCPUs. */
+        ret = -EINVAL;
+        if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
+            goto maxvcpu_out;
+
+        ret = -ENOMEM;
+        for ( i = 0; i < max; i++ )
+        {
+            if ( d->vcpu[i] == NULL )
+            {
+                cpu = (d->vcpu[i-1]->processor + 1) % num_online_cpus();
+                if ( alloc_vcpu(d, i, cpu) == NULL )
+                    goto maxvcpu_out;
+            }
+        }
+
+        ret = 0;
+
+    maxvcpu_out:
+        put_domain(d);
     }
     break;
 
@@ -249,8 +287,6 @@
         domid_t dom = op->u.pincpudomain.domain;
         struct domain *d = find_domain_by_id(dom);
         struct vcpu *v;
-        cpumap_t cpumap;
-
 
         if ( d == NULL )
         {
@@ -281,26 +317,17 @@
             break;
         }
 
-        if ( copy_from_user(&cpumap, op->u.pincpudomain.cpumap,
-                            sizeof(cpumap)) )
-        {
-            ret = -EFAULT;
-            put_domain(d);
-            break;
-        }
-
-        /* update cpumap for this vcpu */
-        v->cpumap = cpumap;
-
-        if ( cpumap == CPUMAP_RUNANYWHERE )
+        v->cpumap = op->u.pincpudomain.cpumap;
+
+        if ( v->cpumap == CPUMAP_RUNANYWHERE )
         {
             clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
         }
         else
         {
             /* pick a new cpu from the usable map */
-            int new_cpu = (int)find_first_set_bit(cpumap) % num_online_cpus();
-
+            int new_cpu;
+            new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus();
             vcpu_pause(v);
             vcpu_migrate_cpu(v, new_cpu);
             set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
@@ -354,6 +381,8 @@
         put_domain(d);
     }
     break;
+
+
 
     case DOM0_GETDOMAININFOLIST:
     { 
@@ -407,66 +436,74 @@
         struct vcpu_guest_context *c;
         struct domain             *d;
         struct vcpu               *v;
-        int i;
-
-        d = find_domain_by_id(op->u.getvcpucontext.domain);
-        if ( d == NULL )
-        {
-            ret = -ESRCH;
-            break;
-        }
-
+
+        ret = -ESRCH;
+        if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
+            break;
+
+        ret = -EINVAL;
         if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
-        {
-            ret = -EINVAL;
-            put_domain(d);
-            break;
-        }
-
-        /* find first valid vcpu starting from request. */
-        v = NULL;
-        for ( i = op->u.getvcpucontext.vcpu; i < MAX_VIRT_CPUS; i++ )
-        {
-            v = d->vcpu[i];
-            if ( v != NULL && !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
-                break;
-        }
-        
-        if ( v == NULL )
-        {
-            ret = -ESRCH;
-            put_domain(d);
-            break;
-        }
-
-        op->u.getvcpucontext.cpu_time = v->cpu_time;
-
-        if ( op->u.getvcpucontext.ctxt != NULL )
-        {
-            if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
-            {
-                ret = -ENOMEM;
-                put_domain(d);
-                break;
-            }
-
-            if ( v != current )
-                vcpu_pause(v);
-
-            arch_getdomaininfo_ctxt(v,c);
-
-            if ( v != current )
-                vcpu_unpause(v);
-
-            if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
-                ret = -EINVAL;
-
-            xfree(c);
-        }
+            goto getvcpucontext_out;
+
+        ret = -ESRCH;
+        if ( (v = d->vcpu[op->u.getvcpucontext.vcpu]) == NULL )
+            goto getvcpucontext_out;
+
+        ret = -ENOMEM;
+        if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
+            goto getvcpucontext_out;
+
+        if ( v != current )
+            vcpu_pause(v);
+
+        arch_getdomaininfo_ctxt(v,c);
+        ret = 0;
+
+        if ( v != current )
+            vcpu_unpause(v);
+
+        if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
+            ret = -EFAULT;
+
+        xfree(c);
 
         if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )     
-            ret = -EINVAL;
-
+            ret = -EFAULT;
+
+    getvcpucontext_out:
+        put_domain(d);
+    }
+    break;
+
+    case DOM0_GETVCPUINFO:
+    { 
+        struct domain *d;
+        struct vcpu   *v;
+
+        ret = -ESRCH;
+        if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
+            break;
+
+        ret = -EINVAL;
+        if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
+            goto getvcpuinfo_out;
+
+        ret = -ESRCH;
+        if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
+            goto getvcpuinfo_out;
+
+        op->u.getvcpuinfo.online   = !test_bit(_VCPUF_down, &v->vcpu_flags);
+        op->u.getvcpuinfo.blocked  = test_bit(_VCPUF_blocked, &v->vcpu_flags);
+        op->u.getvcpuinfo.running  = test_bit(_VCPUF_running, &v->vcpu_flags);
+        op->u.getvcpuinfo.cpu_time = v->cpu_time;
+        op->u.getvcpuinfo.cpu      = v->processor;
+        op->u.getvcpuinfo.cpumap   = v->cpumap;
+        ret = 0;
+
+        if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )     
+            ret = -EFAULT;
+
+    getvcpuinfo_out:
         put_domain(d);
     }
     break;
@@ -521,6 +558,21 @@
     }
     break;
 
+    case DOM0_SETDOMAINHANDLE:
+    {
+        struct domain *d; 
+        ret = -ESRCH;
+        d = find_domain_by_id(op->u.setdomainhandle.domain);
+        if ( d != NULL )
+        {
+            memcpy(d->handle, op->u.setdomainhandle.handle,
+                   sizeof(xen_domain_handle_t));
+            put_domain(d);
+            ret = 0;
+        }
+    }
+    break;
+
 #ifdef PERF_COUNTERS
     case DOM0_PERFCCONTROL:
     {
@@ -535,10 +587,14 @@
         ret = arch_do_dom0_op(op,u_dom0_op);
 
     }
+
+    spin_unlock(&dom0_lock);
+
     if (!ret)
         acm_post_dom0_op(op, ssid);
     else
         acm_fail_dom0_op(op, ssid);
+
     return ret;
 }
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/domain.c
--- a/xen/common/domain.c       Fri Oct 21 19:58:39 2005
+++ b/xen/common/domain.c       Mon Oct 24 15:08:13 2005
@@ -33,38 +33,41 @@
     struct domain *d, **pd;
     struct vcpu *v;
 
-    if ( (d = alloc_domain_struct()) == NULL )
+    if ( (d = alloc_domain()) == NULL )
         return NULL;
 
-    v = d->vcpu[0];
+    d->domain_id = dom_id;
 
     atomic_set(&d->refcnt, 1);
-    atomic_set(&v->pausecnt, 0);
-
-    d->domain_id = dom_id;
-    v->processor = cpu;
 
     spin_lock_init(&d->big_lock);
-
     spin_lock_init(&d->page_alloc_lock);
     INIT_LIST_HEAD(&d->page_list);
     INIT_LIST_HEAD(&d->xenpage_list);
 
     if ( d->domain_id == IDLE_DOMAIN_ID )
         set_bit(_DOMF_idle_domain, &d->domain_flags);
+    else
+        set_bit(_DOMF_ctrl_pause, &d->domain_flags);
 
     if ( !is_idle_task(d) &&
          ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
     {
         evtchn_destroy(d);
-        free_domain_struct(d);
+        free_domain(d);
         return NULL;
     }
     
+    if ( (v = alloc_vcpu(d, 0, cpu)) == NULL )
+    {
+        grant_table_destroy(d);
+        evtchn_destroy(d);
+        free_domain(d);
+        return NULL;
+    }
+
     arch_do_createdomain(v);
     
-    sched_add_domain(v);
-
     if ( !is_idle_task(d) )
     {
         write_lock(&domlist_lock);
@@ -224,11 +227,9 @@
      * must issue a PAUSEDOMAIN command to ensure that all execution
      * has ceased and guest state is committed to memory.
      */
+    set_bit(_DOMF_ctrl_pause, &d->domain_flags);
     for_each_vcpu ( d, v )
-    {
-        set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
         vcpu_sleep_nosync(v);
-    }
 
     send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER);
 }
@@ -267,7 +268,7 @@
     free_perdomain_pt(d);
     free_xenheap_page(d->shared_info);
 
-    free_domain_struct(d);
+    free_domain(d);
 
     send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
 }
@@ -289,6 +290,8 @@
         atomic_inc(&v->pausecnt);
         vcpu_sleep_sync(v);
     }
+
+    sync_pagetable_state(d);
 }
 
 void vcpu_unpause(struct vcpu *v)
@@ -310,21 +313,24 @@
 {
     struct vcpu *v;
 
-    for_each_vcpu ( d, v )
-    {
-        BUG_ON(v == current);
-        if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
+    BUG_ON(current->domain == d);
+
+    if ( !test_and_set_bit(_DOMF_ctrl_pause, &d->domain_flags) )
+    {
+        for_each_vcpu ( d, v )
             vcpu_sleep_sync(v);
     }
+
+    sync_pagetable_state(d);
 }
 
 void domain_unpause_by_systemcontroller(struct domain *d)
 {
     struct vcpu *v;
 
-    for_each_vcpu ( d, v )
-    {
-        if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
+    if ( test_and_clear_bit(_DOMF_ctrl_pause, &d->domain_flags) )
+    {
+        for_each_vcpu ( d, v )
             vcpu_wake(v);
     }
 }
@@ -345,61 +351,30 @@
     if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
         return -EINVAL;
     
-    if (test_bit(_DOMF_constructed, &d->domain_flags) && 
-        !test_bit(_VCPUF_ctrl_pause, &v->vcpu_flags))
+    if ( !test_bit(_DOMF_ctrl_pause, &d->domain_flags) )
         return -EINVAL;
 
     if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
         return -ENOMEM;
 
-    if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
-    
-    if ( (rc = arch_set_info_guest(v, c)) != 0 )
-        goto out;
-
-    set_bit(_DOMF_constructed, &d->domain_flags);
-
- out:    
+    rc = -EFAULT;
+    if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) == 0 )
+        rc = arch_set_info_guest(v, c);
+
     xfree(c);
     return rc;
 }
 
 int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt) 
 {
-    struct vcpu *v;
+    struct vcpu *v = d->vcpu[vcpuid];
     int rc;
 
-    ASSERT(d->vcpu[vcpuid] == NULL);
-
-    if ( alloc_vcpu_struct(d, vcpuid) == NULL )
-        return -ENOMEM;
-
-    v = d->vcpu[vcpuid];
-
-    atomic_set(&v->pausecnt, 0);
-    v->cpumap = CPUMAP_RUNANYWHERE;
-
-    memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
-
-    arch_do_boot_vcpu(v);
+    BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 
     if ( (rc = arch_set_info_guest(v, ctxt)) != 0 )
-        goto out;
-
-    sched_add_domain(v);
-
-    set_bit(_VCPUF_down, &v->vcpu_flags);
-    clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
-
-    return 0;
-
- out:
-    arch_free_vcpu_struct(d->vcpu[vcpuid]);
-    d->vcpu[vcpuid] = NULL;
+        return rc;
+
     return rc;
 }
 
@@ -413,7 +388,7 @@
     if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
         return -EINVAL;
 
-    if ( ((v = d->vcpu[vcpuid]) == NULL) && (cmd != VCPUOP_initialise) )
+    if ( (v = d->vcpu[vcpuid]) == NULL )
         return -ENOENT;
 
     switch ( cmd )
@@ -433,7 +408,9 @@
         }
 
         LOCK_BIGLOCK(d);
-        rc = (d->vcpu[vcpuid] == NULL) ? boot_vcpu(d, vcpuid, ctxt) : -EEXIST;
+        rc = -EEXIST;
+        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+            rc = boot_vcpu(d, vcpuid, ctxt);
         UNLOCK_BIGLOCK(d);
 
         xfree(ctxt);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Fri Oct 21 19:58:39 2005
+++ b/xen/common/event_channel.c        Mon Oct 24 15:08:13 2005
@@ -89,7 +89,8 @@
     chn = evtchn_from_port(d, port);
 
     chn->state = ECS_UNBOUND;
-    chn->u.unbound.remote_domid = alloc->remote_dom;
+    if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
+        chn->u.unbound.remote_domid = current->domain->domain_id;
 
     alloc->port = port;
 
@@ -107,9 +108,13 @@
     struct evtchn *lchn, *rchn;
     struct domain *ld = current->domain, *rd;
     int            lport, rport = bind->remote_port;
+    domid_t        rdom = bind->remote_dom;
     long           rc = 0;
 
-    if ( (rd = find_domain_by_id(bind->remote_dom)) == NULL )
+    if ( rdom == DOMID_SELF )
+        rdom = current->domain->domain_id;
+
+    if ( (rd = find_domain_by_id(rdom)) == NULL )
         return -ESRCH;
 
     /* Avoid deadlock by first acquiring lock of domain with smaller id. */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Fri Oct 21 19:58:39 2005
+++ b/xen/common/keyhandler.c   Mon Oct 24 15:08:13 2005
@@ -99,7 +99,7 @@
 static void do_task_queues(unsigned char key)
 {
     struct domain *d;
-    struct vcpu *v;
+    struct vcpu   *v;
     s_time_t       now = NOW();
 
     printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
@@ -112,6 +112,12 @@
         printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
                "xenheap_pages=%d\n", d->domain_id, d->domain_flags,
                atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
+        printk("     handle=%02x%02x%02x%02x-%02x%02x%02x%02x-"
+               "%02x%02x%02x%02x-%02x%02x%02x%02x\n",
+               d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
+               d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7],
+               d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11],
+               d->handle[12], d->handle[13], d->handle[14], d->handle[15]);
 
         dump_pageframe_info(d);
                
@@ -130,7 +136,7 @@
                             &d->shared_info->evtchn_pending[0]),
                    test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 
                             &d->shared_info->evtchn_mask[0]),
-                   test_bit(v->virq_to_evtchn[VIRQ_DEBUG]>>5, 
+                   test_bit(v->virq_to_evtchn[VIRQ_DEBUG]/BITS_PER_LONG, 
                             &v->vcpu_info->evtchn_pending_sel));
             send_guest_virq(v, VIRQ_DEBUG);
         }
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Fri Oct 21 19:58:39 2005
+++ b/xen/common/page_alloc.c   Mon Oct 24 15:08:13 2005
@@ -610,8 +610,7 @@
         for ( i = 0; i < (1 << order); i++ )
         {
             shadow_drop_references(d, &pg[i]);
-            ASSERT(((pg[i].u.inuse.type_info & PGT_count_mask) == 0) ||
-                   shadow_tainted_refcnts(d));
+            ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
             pg[i].tlbflush_timestamp  = tlbflush_current_time();
             pg[i].u.free.cpumask      = d->cpumask;
             list_del(&pg[i].list);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Fri Oct 21 19:58:39 2005
+++ b/xen/common/sched_sedf.c   Mon Oct 24 15:08:13 2005
@@ -1122,10 +1122,10 @@
 void sedf_wake(struct vcpu *d) {
     s_time_t              now = NOW();
     struct sedf_vcpu_info* inf = EDOM_INFO(d);
- 
+
     PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id,
           d->vcpu_id);
- 
+
     if (unlikely(is_idle_task(d->domain)))
         return;
    
@@ -1150,7 +1150,7 @@
     inf->block_tot++;
 #endif
     if (unlikely(now < PERIOD_BEGIN(inf))) {
-       PRINT(4,"extratime unblock\n");
+        PRINT(4,"extratime unblock\n");
         /* unblocking in extra-time! */
 #if (EXTRA == EXTRA_BLOCK_WEIGHT)
         if (inf->status & EXTRA_WANT_PEN_Q) {
@@ -1459,3 +1459,13 @@
     .wake           = sedf_wake,
     .adjdom         = sedf_adjdom,
 };
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/common/schedule.c
--- a/xen/common/schedule.c     Fri Oct 21 19:58:39 2005
+++ b/xen/common/schedule.c     Mon Oct 24 15:08:13 2005
@@ -80,85 +80,64 @@
 /* Per-CPU periodic timer sends an event to the currently-executing domain. */
 static struct ac_timer t_timer[NR_CPUS]; 
 
-void free_domain_struct(struct domain *d)
+void free_domain(struct domain *d)
 {
     int i;
 
     SCHED_OP(free_task, d);
-    /* vcpu 0 has to be the last one destructed. */
-    for (i = MAX_VIRT_CPUS-1; i >= 0; i--)
-        if ( d->vcpu[i] )
-            arch_free_vcpu_struct(d->vcpu[i]);
+
+    for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+        if ( d->vcpu[i] != NULL )
+            free_vcpu_struct(d->vcpu[i]);
 
     xfree(d);
 }
 
-struct vcpu *alloc_vcpu_struct(
-    struct domain *d, unsigned long vcpu)
-{
-    struct vcpu *v, *vc;
-
-    ASSERT( d->vcpu[vcpu] == NULL );
-
-    if ( (v = arch_alloc_vcpu_struct()) == NULL )
+struct vcpu *alloc_vcpu(
+    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
+{
+    struct vcpu *v;
+
+    BUG_ON(d->vcpu[vcpu_id] != NULL);
+
+    if ( (v = alloc_vcpu_struct(d, vcpu_id)) == NULL )
         return NULL;
 
-    memset(v, 0, sizeof(*v));
-
-    d->vcpu[vcpu] = v;
     v->domain = d;
-    v->vcpu_id = vcpu;
+    v->vcpu_id = vcpu_id;
+    v->processor = cpu_id;
+    atomic_set(&v->pausecnt, 0);
+    v->cpumap = CPUMAP_RUNANYWHERE;
+
+    d->vcpu[vcpu_id] = v;
 
     if ( SCHED_OP(alloc_task, v) < 0 )
-        goto out;
-
-    if ( vcpu != 0 )
-    {
-        v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
-
-        for_each_vcpu( d, vc )
-        {
-            if ( (vc->next_in_list == NULL) ||
-                 (vc->next_in_list->vcpu_id > vcpu) )
-                break;
-        }
-        v->next_in_list  = vc->next_in_list;
-        vc->next_in_list = v;
-
-        if (test_bit(_VCPUF_cpu_pinned, &vc->vcpu_flags)) {
-            v->processor = (vc->processor + 1) % num_online_cpus();
-            set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
-        } else {
-            v->processor = (vc->processor + 1) % num_online_cpus();
-        }
+    {
+        d->vcpu[vcpu_id] = NULL;
+        free_vcpu_struct(v);
+        return NULL;
+    }
+
+    sched_add_domain(v);
+
+    if ( vcpu_id != 0 )
+    {
+        v->vcpu_info = &d->shared_info->vcpu_data[vcpu_id];
+        d->vcpu[v->vcpu_id-1]->next_in_list = v;
+        set_bit(_VCPUF_down, &v->vcpu_flags);
     }
 
     return v;
-
- out:
-    d->vcpu[vcpu] = NULL;
-    arch_free_vcpu_struct(v);
-
-    return NULL;
-}
-
-struct domain *alloc_domain_struct(void)
+}
+
+struct domain *alloc_domain(void)
 {
     struct domain *d;
 
-    if ( (d = xmalloc(struct domain)) == NULL )
-        return NULL;
-    
-    memset(d, 0, sizeof(*d));
-
-    if ( alloc_vcpu_struct(d, 0) == NULL )
-        goto out;
+    if ( (d = xmalloc(struct domain)) != NULL )
+        memset(d, 0, sizeof(*d));
 
     return d;
-
- out:
-    xfree(d);
-    return NULL;
 }
 
 /*
@@ -176,11 +155,6 @@
         schedule_data[v->processor].curr = v;
         schedule_data[v->processor].idle = v;
         set_bit(_VCPUF_running, &v->vcpu_flags);
-    }
-    else
-    {
-        /* Must be unpaused by control software to start execution. */
-        set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
     }
 
     SCHED_OP(add_task, v);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/acm/acm_core.h
--- a/xen/include/acm/acm_core.h        Fri Oct 21 19:58:39 2005
+++ b/xen/include/acm/acm_core.h        Mon Oct 24 15:08:13 2005
@@ -15,6 +15,7 @@
  *    for the access control module and relevant policies
  *
  */
+
 #ifndef _ACM_CORE_H
 #define _ACM_CORE_H
 
@@ -25,30 +26,30 @@
 
 /* Xen-internal representation of the binary policy */
 struct acm_binary_policy {
-       u16 primary_policy_code;
-       u16 secondary_policy_code;
-       void *primary_binary_policy;                                 
-       void *secondary_binary_policy;
-       
+    u16 primary_policy_code;
+    u16 secondary_policy_code;
+    void *primary_binary_policy;                                 
+    void *secondary_binary_policy;
+ 
 };
 
 struct chwall_binary_policy {
-       u16 max_types;
-       u16 max_ssidrefs;
-       u16 max_conflictsets;
-       domaintype_t *ssidrefs;                 /* [max_ssidrefs][max_types]    
*/
-       domaintype_t *conflict_aggregate_set;   /* [max_types]                  
*/
-       domaintype_t *running_types;            /* [max_types]                  
*/
-       domaintype_t *conflict_sets;            /* 
[max_conflictsets][max_types]*/
+    u32 max_types;
+    u32 max_ssidrefs;
+    u32 max_conflictsets;
+    domaintype_t *ssidrefs;     /* [max_ssidrefs][max_types]  */
+    domaintype_t *conflict_aggregate_set;  /* [max_types]      */
+    domaintype_t *running_types;    /* [max_types]      */
+    domaintype_t *conflict_sets;   /* [max_conflictsets][max_types]*/
 };
 
 struct ste_binary_policy {
-       u16 max_types;
-       u16 max_ssidrefs;
-       domaintype_t *ssidrefs;                 /* [max_ssidrefs][max_types]    
*/
-       atomic_t ec_eval_count, gt_eval_count;
-       atomic_t ec_denied_count, gt_denied_count; 
-       atomic_t ec_cachehit_count, gt_cachehit_count;
+    u32 max_types;
+    u32 max_ssidrefs;
+    domaintype_t *ssidrefs;     /* [max_ssidrefs][max_types]  */
+    atomic_t ec_eval_count, gt_eval_count;
+    atomic_t ec_denied_count, gt_denied_count; 
+    atomic_t ec_cachehit_count, gt_cachehit_count;
 };
 
 /* global acm policy */
@@ -63,7 +64,7 @@
 
 /* defines number of access decisions to other domains can be cached
  * one entry per domain, TE does not distinguish evtchn or grant_table */
-#define ACM_TE_CACHE_SIZE      8
+#define ACM_TE_CACHE_SIZE 8
 enum acm_ste_flag { VALID, FREE };
 
 /* cache line:
@@ -72,57 +73,67 @@
  *                 on domain cache_line.id
  */
 struct acm_ste_cache_line {
-       enum acm_ste_flag valid;
-       domid_t id;
+    enum acm_ste_flag valid;
+    domid_t id;
 };
 
 /* general definition of a subject security id */
 struct acm_ssid_domain {
-       enum acm_datatype datatype;             /* type of subject (e.g., 
partition) */
-       ssidref_t         ssidref;              /* combined security reference 
*/
-       void              *primary_ssid;        /* primary policy ssid part 
(e.g. chinese wall) */
-       void              *secondary_ssid;      /* secondary policy ssid part 
(e.g. type enforcement) */
-       struct domain     *subject;             /* backpointer to subject 
structure */
-       domid_t           domainid;             /* replicate id */
+    enum acm_datatype datatype; /* type of subject (e.g., partition) */
+    ssidref_t ssidref;   /* combined security reference */
+    void *primary_ssid;   /* primary policy ssid part (e.g. chinese wall) */
+    void *secondary_ssid;    /* secondary policy ssid part (e.g. type 
enforcement) */
+    struct domain *subject;     /* backpointer to subject structure */
+    domid_t domainid;   /* replicate id */
 };
 
 /* chinese wall ssid type */
 struct chwall_ssid {
-       ssidref_t chwall_ssidref;
+    ssidref_t chwall_ssidref;
 };
 
 /* simple type enforcement ssid type */
 struct ste_ssid {
-       ssidref_t ste_ssidref;
-       struct acm_ste_cache_line ste_cache[ACM_TE_CACHE_SIZE]; /* decision 
cache */
+    ssidref_t ste_ssidref;
+    struct acm_ste_cache_line ste_cache[ACM_TE_CACHE_SIZE]; /* decision cache 
*/
 };
 
 /* macros to access ssidref for primary / secondary policy 
- *     primary ssidref   = lower 16 bit
- *      secondary ssidref = higher 16 bit
+ * primary ssidref   = lower 16 bit
+ *  secondary ssidref = higher 16 bit
  */
 #define ACM_PRIMARY(ssidref) \
-       ((ssidref) & 0xffff)
+ ((ssidref) & 0xffff)
 
 #define ACM_SECONDARY(ssidref) \
-       ((ssidref) >> 16)
+ ((ssidref) >> 16)
 
 #define GET_SSIDREF(POLICY, ssidref) \
-       ((POLICY) == acm_bin_pol.primary_policy_code) ? \
-       ACM_PRIMARY(ssidref) : ACM_SECONDARY(ssidref)
+ ((POLICY) == acm_bin_pol.primary_policy_code) ? \
+ ACM_PRIMARY(ssidref) : ACM_SECONDARY(ssidref)
 
 /* macros to access ssid pointer for primary / secondary policy */
 #define GET_SSIDP(POLICY, ssid) \
-       ((POLICY) == acm_bin_pol.primary_policy_code) ? \
-       ((ssid)->primary_ssid) : ((ssid)->secondary_ssid)
+ ((POLICY) == acm_bin_pol.primary_policy_code) ? \
+ ((ssid)->primary_ssid) : ((ssid)->secondary_ssid)
 
 /* protos */
 int acm_init_domain_ssid(domid_t id, ssidref_t ssidref);
-int acm_free_domain_ssid(struct acm_ssid_domain *ssid);
-int acm_set_policy(void *buf, u16 buf_size, int isuserbuffer);
-int acm_get_policy(void *buf, u16 buf_size);
+void acm_free_domain_ssid(struct acm_ssid_domain *ssid);
+int acm_set_policy(void *buf, u32 buf_size, int isuserbuffer);
+int acm_get_policy(void *buf, u32 buf_size);
 int acm_dump_statistics(void *buf, u16 buf_size);
 int acm_get_ssid(ssidref_t ssidref, u8 *buf, u16 buf_size);
+int acm_get_decision(ssidref_t ssidref1, ssidref_t ssidref2, enum 
acm_hook_type hook);
 
 #endif
 
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/acm/acm_endian.h
--- a/xen/include/acm/acm_endian.h      Fri Oct 21 19:58:39 2005
+++ b/xen/include/acm/acm_endian.h      Mon Oct 24 15:08:13 2005
@@ -18,6 +18,7 @@
  * big-endian policy interface
  *
  */
+
 #ifndef _ACM_ENDIAN_H
 #define _ACM_ENDIAN_H
 
@@ -30,10 +31,10 @@
 {
     if (little_endian)
         return 
-           ( (((x) >> 24) & 0xff      )| 
-             (((x) >>  8) & 0xff00    )| 
-             (((x) <<  8) & 0xff0000  )|
-             (((x) << 24) & 0xff000000) );
+            ( (((x) >> 24) & 0xff      )| 
+              (((x) >>  8) & 0xff00    )| 
+              (((x) <<  8) & 0xff0000  )|
+              (((x) << 24) & 0xff000000) );
     else
         return x;
 }
@@ -42,10 +43,10 @@
 {
     if (little_endian)
         return 
-           ( (((x) >> 8) & 0xff   )|
-             (((x) << 8) & 0xff00 ) );
+            ( (((x) >> 8) & 0xff   )|
+              (((x) << 8) & 0xff00 ) );
     else
-       return x;
+        return x;
 }
 
 #define htonl(x) ntohl(x)
@@ -55,8 +56,8 @@
 {
     unsigned int i = 0;
     while (i < n) {
-               dest[i] = htons(src[i]);
-               i++;
+        dest[i] = htons(src[i]);
+        i++;
     }
 }
 
@@ -64,8 +65,8 @@
 {
     unsigned int i = 0;
     while (i < n) {
-       dest[i] = htonl(src[i]);
-       i++;
+        dest[i] = htonl(src[i]);
+        i++;
     }
 }
 
@@ -86,3 +87,13 @@
 }
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/acm/acm_hooks.h
--- a/xen/include/acm/acm_hooks.h       Fri Oct 21 19:58:39 2005
+++ b/xen/include/acm/acm_hooks.h       Mon Oct 24 15:08:13 2005
@@ -15,6 +15,7 @@
  *      sHype hooks that are called throughout Xen.
  * 
  */
+
 #ifndef _ACM_HOOKS_H
 #define _ACM_HOOKS_H
 
@@ -89,8 +90,8 @@
     /* policy management functions (must always be defined!) */
     int  (*init_domain_ssid)           (void **ssid, ssidref_t ssidref);
     void (*free_domain_ssid)           (void *ssid);
-    int  (*dump_binary_policy)         (u8 *buffer, u16 buf_size);
-    int  (*set_binary_policy)          (u8 *buffer, u16 buf_size);
+    int  (*dump_binary_policy)         (u8 *buffer, u32 buf_size);
+    int  (*set_binary_policy)          (u8 *buffer, u32 buf_size);
     int  (*dump_statistics)            (u8 *buffer, u16 buf_size);
     int  (*dump_ssid_types)            (ssidref_t ssidref, u8 *buffer, u16 
buf_size);
     /* domain management control hooks (can be NULL) */
@@ -108,6 +109,8 @@
     void (*fail_grant_map_ref)         (domid_t id);
     int  (*pre_grant_setup)            (domid_t id);
     void (*fail_grant_setup)           (domid_t id);
+    /* generic domain-requested decision hooks (can be NULL) */
+    int (*sharing)                     (ssidref_t ssidref1, ssidref_t 
ssidref2);
 };
 
 /* global variables */
@@ -144,6 +147,8 @@
 { return 0; }
 static inline void acm_post_domain0_create(domid_t domid) 
 { return; }
+static inline int acm_sharing(ssidref_t ssidref1, ssidref_t ssidref2)
+{ return 0; }
 
 #else
 
@@ -281,7 +286,8 @@
         break;
     case EVTCHNOP_bind_interdomain:
         ret = acm_pre_eventchannel_interdomain(
-            op->u.bind_interdomain.dom1, op->u.bind_interdomain.dom2);
+            current->domain->domain_id,
+            op->u.bind_interdomain.remote_dom);
         break;
     default:
         ret = 0; /* ok */
@@ -341,6 +347,18 @@
     acm_post_domain_create(domid, ACM_DOM0_SSIDREF);
 }
 
+static inline int acm_sharing(ssidref_t ssidref1, ssidref_t ssidref2)
+{
+    if ((acm_primary_ops->sharing != NULL) &&
+        acm_primary_ops->sharing(ssidref1, ssidref2))
+        return ACM_ACCESS_DENIED;
+    else if ((acm_secondary_ops->sharing != NULL) &&
+             acm_secondary_ops->sharing(ssidref1, ssidref2)) {
+        return ACM_ACCESS_DENIED;
+    } else
+        return ACM_ACCESS_PERMITTED;
+}
+
 extern int acm_init(unsigned int *initrdidx,
                     const multiboot_info_t *mbi,
                     unsigned long start);
@@ -348,3 +366,13 @@
 #endif
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/domain.h      Mon Oct 24 15:08:13 2005
@@ -32,8 +32,6 @@
     struct shadow_ops *ops;
     unsigned int shadow_mode;  /* flags to control shadow table operation */
     unsigned int shadow_nest;  /* Recursive depth of shadow_lock() nesting */
-    /* Shadow mode has tainted page reference counts? */
-    unsigned int shadow_tainted_refcnts;
 
     /* shadow hashtable */
     struct shadow_status *shadow_ht;
@@ -63,8 +61,8 @@
 
     struct list_head free_shadow_frames;
 
-    pagetable_t  phys_table;               /* guest 1:1 pagetable */
-    struct virtual_platform_def vmx_platform;
+    pagetable_t         phys_table;         /* guest 1:1 pagetable */
+    struct vmx_platform vmx_platform;
 } __cacheline_aligned;
 
 struct arch_vcpu
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/mm.h  Mon Oct 24 15:08:13 2005
@@ -336,7 +336,12 @@
 int  revalidate_l1(struct domain *, l1_pgentry_t *, l1_pgentry_t *);
 
 void cleanup_writable_pagetable(struct domain *d);
-#define sync_pagetable_state(d) cleanup_writable_pagetable(d)
+#define sync_pagetable_state(d)                 \
+    do {                                        \
+        LOCK_BIGLOCK(d);                        \
+        cleanup_writable_pagetable(d);          \
+        UNLOCK_BIGLOCK(d);                      \
+    } while ( 0 )
 
 int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/shadow.h      Mon Oct 24 15:08:13 2005
@@ -54,8 +54,6 @@
 #define shadow_mode_log_dirty(_d) ((_d)->arch.shadow_mode & SHM_log_dirty)
 #define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate)
 #define shadow_mode_external(_d)  ((_d)->arch.shadow_mode & SHM_external)
-
-#define shadow_tainted_refcnts(_d) ((_d)->arch.shadow_tainted_refcnts)
 
 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
 #define __shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
@@ -304,10 +302,12 @@
 
 struct out_of_sync_entry {
     struct out_of_sync_entry *next;
+    struct vcpu   *v;
     unsigned long gpfn;    /* why is this here? */
     unsigned long gmfn;
     unsigned long snapshot_mfn;
     unsigned long writable_pl1e; /* NB: this is a machine address */
+    unsigned long va;
 };
 
 #define out_of_sync_extra_size 127
@@ -386,6 +386,10 @@
 
     nl1e = l1e;
     l1e_remove_flags(nl1e, _PAGE_GLOBAL);
+
+    if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) )
+        return 0;
+
     res = get_page_from_l1e(nl1e, d);
 
     if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
@@ -718,6 +722,23 @@
     put_shadow_ref(smfn);
 }
 
+/*
+ * SMP issue. The following code assumes the shadow lock is held. Re-visit
+ * when working on finer-gained locks for shadow.
+ */
+static inline void set_guest_back_ptr(
+    struct domain *d, l1_pgentry_t spte, unsigned long smfn, unsigned int 
index)
+{
+    if ( shadow_mode_external(d) ) {
+        unsigned long gmfn;
+
+        ASSERT(shadow_lock_is_acquired(d));
+        gmfn = l1e_get_pfn(spte);
+        frame_table[gmfn].tlbflush_timestamp = smfn;
+        frame_table[gmfn].u.inuse.type_info &= ~PGT_va_mask;
+        frame_table[gmfn].u.inuse.type_info |= (unsigned long) index << 
PGT_va_shift;
+    }
+}
 
 /************************************************************************/
 #if CONFIG_PAGING_LEVELS <= 2
@@ -944,13 +965,15 @@
             //
             perfc_incrc(validate_pte_changes3);
 
-            if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
-                 !shadow_get_page_from_l1e(new_spte, d) )
-                new_spte = l1e_empty();
             if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
             {
                 shadow_put_page_from_l1e(old_spte, d);
                 need_flush = 1;
+            }
+            if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
+                 !shadow_get_page_from_l1e(new_spte, d) ) {
+                new_spte = l1e_empty();
+                need_flush = -1; /* need to unshadow the page */
             }
         }
         else
@@ -1611,10 +1634,11 @@
             if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
                 shadow_put_page_from_l1e(old_spte, d);
         }
-    }
-
+
+    }
+
+    set_guest_back_ptr(d, new_spte, l2e_get_pfn(sl2e), l1_table_offset(va));
     shadow_linear_pg_table[l1_linear_offset(va)] = new_spte;
-
     shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
 }
 #endif
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/vmx.h Mon Oct 24 15:08:13 2005
@@ -34,6 +34,7 @@
 extern void vmx_asm_do_resume(void);
 extern void vmx_asm_do_launch(void);
 extern void vmx_intr_assist(void);
+extern void pic_irq_request(int *interrupt_request, int level);
 
 extern void arch_vmx_do_launch(struct vcpu *);
 extern void arch_vmx_do_resume(struct vcpu *);
@@ -64,6 +65,7 @@
     CPU_BASED_MWAIT_EXITING | \
     CPU_BASED_MOV_DR_EXITING | \
     CPU_BASED_ACTIVATE_IO_BITMAP | \
+    CPU_BASED_USE_TSC_OFFSETING  | \
     CPU_BASED_UNCOND_IO_EXITING \
     )
 
@@ -502,6 +504,11 @@
     return 0;
 }
 
+static inline unsigned int vmx_get_vcpu_nr(struct domain *d)
+{
+    return d->arch.vmx_platform.nr_vcpu;
+}
+
 static inline shared_iopage_t *get_sp(struct domain *d)
 {
     return (shared_iopage_t *) d->arch.vmx_platform.shared_page_va;
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/vmx_intercept.h
--- a/xen/include/asm-x86/vmx_intercept.h       Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/vmx_intercept.h       Mon Oct 24 15:08:13 2005
@@ -8,49 +8,52 @@
 #include <xen/errno.h>
 #include <public/io/ioreq.h>
 
-#define MAX_IO_HANDLER 10
+#define MAX_IO_HANDLER              8
 
-typedef int (*intercept_action_t)(ioreq_t*);
+#define VMX_PORTIO                  0
+#define VMX_MMIO                    1
 
-enum {PORTIO, MMIO};
+typedef int (*intercept_action_t)(ioreq_t *);
 
-struct vmx_handler_t {
-    int num_slot;
-    struct {
-        unsigned long       addr;
-        int type;
-        unsigned long       offset;
-        intercept_action_t  action;
-    } hdl_list[MAX_IO_HANDLER];
+struct io_handler {
+    int                 type;
+    unsigned long       addr;
+    unsigned long       size;
+    intercept_action_t  action;
+};
+
+struct vmx_io_handler {
+    int     num_slot;
+    struct  io_handler hdl_list[MAX_IO_HANDLER];
 };
 
 /* global io interception point in HV */
 extern int vmx_io_intercept(ioreq_t *p, int type);
-extern int register_io_handler(unsigned long addr, unsigned long offset, 
+extern int register_io_handler(unsigned long addr, unsigned long size,
                                intercept_action_t action, int type);
 
 static inline int vmx_portio_intercept(ioreq_t *p)
 {
-    return vmx_io_intercept(p, PORTIO);
+    return vmx_io_intercept(p, VMX_PORTIO);
 }
 
 static inline int vmx_mmio_intercept(ioreq_t *p)
 {
-    return vmx_io_intercept(p, MMIO);
+    return vmx_io_intercept(p, VMX_MMIO);
 }
 
-static inline int register_portio_handler(unsigned long addr, 
-                                          unsigned long offset, 
+static inline int register_portio_handler(unsigned long addr,
+                                          unsigned long size,
                                           intercept_action_t action)
 {
-    return register_io_handler(addr, offset, action, PORTIO);
+    return register_io_handler(addr, size, action, VMX_PORTIO);
 }
 
-static inline int register_mmio_handler(unsigned long addr, 
-                                        unsigned long offset, 
+static inline int register_mmio_handler(unsigned long addr,
+                                        unsigned long size,
                                         intercept_action_t action)
 {
-    return register_io_handler(addr, offset, action, MMIO);
+    return register_io_handler(addr, size, action, VMX_MMIO);
 }
 
 #endif /* _VMX_INTERCEPT_H */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/vmx_platform.h
--- a/xen/include/asm-x86/vmx_platform.h        Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/vmx_platform.h        Mon Oct 24 15:08:13 2005
@@ -24,11 +24,12 @@
 #include <asm/e820.h>
 #include <asm/vmx_virpit.h>
 #include <asm/vmx_intercept.h>
+#include <public/io/vmx_vpic.h>
 
 #define MAX_OPERAND_NUM 2
 
-#define mk_operand(size, index, seg, flag) \
-    (((size) << 24) | ((index) << 16) | ((seg) << 8) | (flag))
+#define mk_operand(size_reg, index, seg, flag) \
+    (((size_reg) << 24) | ((index) << 16) | ((seg) << 8) | (flag))
 
 #define operand_size(operand)   \
       ((operand >> 24) & 0xFF)
@@ -63,6 +64,7 @@
 #define INSTR_MOVZ 8
 #define INSTR_STOS 9
 #define INSTR_TEST 10
+#define INSTR_BT 11
 
 struct instruction {
     __s8    instr; /* instruction type */
@@ -75,11 +77,14 @@
 
 #define MAX_INST_LEN      32
 
-struct virtual_platform_def {
-    unsigned long          *real_mode_data; /* E820, etc. */
+struct vmx_platform {
     unsigned long          shared_page_va;
-    struct vmx_virpit_t    vmx_pit;
-    struct vmx_handler_t   vmx_handler;
+    unsigned int           nr_vcpu;
+
+    struct vmx_virpit      vmx_pit;
+    struct vmx_io_handler  vmx_io_handler;
+    struct vmx_virpic      vmx_pic;
+    int                    interrupt_request;
 };
 
 extern void handle_mmio(unsigned long, unsigned long);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/vmx_virpit.h
--- a/xen/include/asm-x86/vmx_virpit.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/vmx_virpit.h  Mon Oct 24 15:08:13 2005
@@ -16,15 +16,14 @@
 #define LSByte_multiple 2
 #define MSByte_multiple 3
 
-struct vmx_virpit_t {
+struct vmx_virpit {
     /* for simulation of counter 0 in mode 2*/
-    int vector;    /* the pit irq vector */
-    unsigned int period;  /* the frequency. e.g. 10ms*/
+    u32 period;                /* pit frequency in ns */
+    u64 period_cycles;                 /* pit frequency in cpu cycles */
     s_time_t scheduled;                 /* scheduled timer interrupt */
     unsigned int channel;  /* the pit channel, counter 0~2 */
-    u64  *intr_bitmap;
     unsigned int pending_intr_nr; /* the couner for pending timer interrupts */
-    unsigned long long inject_point; /* the time inject virt intr */
+    u64 inject_point; /* the time inject virt intr */
     struct ac_timer pit_timer;  /* periodic timer for mode 2*/
     int first_injected;                 /* flag to prevent shadow window */
 
@@ -39,6 +38,6 @@
 };
 
 /* to hook the ioreq packet to get the PIT initializaiton info */
-extern void vmx_hooks_assist(struct vcpu *d);
+extern void vmx_hooks_assist(struct vcpu *v);
 
 #endif /* _VMX_VIRPIT_H_ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/asm-x86/vmx_vmcs.h
--- a/xen/include/asm-x86/vmx_vmcs.h    Fri Oct 21 19:58:39 2005
+++ b/xen/include/asm-x86/vmx_vmcs.h    Mon Oct 24 15:08:13 2005
@@ -29,11 +29,14 @@
 
 #if defined (__x86_64__)
 extern void vmx_load_msrs(struct vcpu *n);
-void vmx_restore_msrs(struct vcpu *d);
+void vmx_restore_msrs(struct vcpu *v);
 #else
 #define vmx_load_msrs(_n)          ((void)0)
 #define vmx_restore_msrs(_v)       ((void)0)
 #endif
+
+void vmx_final_setup_guest(struct vcpu *v);
+void vmx_relinquish_resources(struct vcpu *v);
 
 void vmx_enter_scheduler(void);
 
@@ -55,7 +58,7 @@
 
 extern int vmcs_size;
 
-enum { 
+enum {
     VMX_INDEX_MSR_LSTAR = 0,
     VMX_INDEX_MSR_STAR,
     VMX_INDEX_MSR_CSTAR,
@@ -79,7 +82,7 @@
     struct cpu_user_regs   *inst_decoder_regs; /* current context */
 };
 
-#define PC_DEBUG_PORT   0x80 
+#define PC_DEBUG_PORT   0x80
 
 struct arch_vmx_struct {
     struct vmcs_struct      *vmcs;  /* VMCS pointer in virtual */
@@ -93,12 +96,13 @@
     struct msr_state        msr_content;
     struct mmio_op          mmio_op;  /* MMIO */
     void                    *io_bitmap_a, *io_bitmap_b;
+    u64                     tsc_offset;
 };
 
 #define vmx_schedule_tail(next)         \
     (next)->thread.arch_vmx.arch_vmx_schedule_tail((next))
 
-#define VMX_DOMAIN(ed)   ((ed)->arch.arch_vmx.flags)
+#define VMX_DOMAIN(v)   ((v)->arch.arch_vmx.flags)
 
 #define ARCH_VMX_VMCS_LOADED    0       /* VMCS has been loaded and active */
 #define ARCH_VMX_VMCS_LAUNCH    1       /* Needs VMCS launch */
@@ -278,7 +282,8 @@
 extern unsigned int opt_vmx_debug_level;
 #define VMX_DBG_LOG(level, _f, _a...)           \
     if ((level) & opt_vmx_debug_level)          \
-        printk("[VMX]" _f "\n", ## _a )
+        printk("[VMX:%d.%d] " _f "\n",          \
+                current->domain->domain_id, current->vcpu_id, ## _a)
 #else
 #define VMX_DBG_LOG(level, _f, _a...)
 #endif
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/acm.h
--- a/xen/include/public/acm.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/acm.h  Mon Oct 24 15:08:13 2005
@@ -8,7 +8,7 @@
  *
  * Contributors:
  * Stefan Berger <stefanb@xxxxxxxxxxxxxx> 
- *     added network byte order support for binary policies
+ * added network byte order support for binary policies
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -41,24 +41,24 @@
 #endif
 
 /* default ssid reference value if not supplied */
-#define ACM_DEFAULT_SSID       0x0
+#define ACM_DEFAULT_SSID  0x0
 #define ACM_DEFAULT_LOCAL_SSID  0x0
 
 /* Internal ACM ERROR types */
-#define ACM_OK                          0
-#define ACM_UNDEF                      -1
-#define ACM_INIT_SSID_ERROR            -2
-#define ACM_INIT_SOID_ERROR            -3
-#define ACM_ERROR                      -4
+#define ACM_OK     0
+#define ACM_UNDEF   -1
+#define ACM_INIT_SSID_ERROR  -2
+#define ACM_INIT_SOID_ERROR  -3
+#define ACM_ERROR          -4
 
 /* External ACCESS DECISIONS */
-#define ACM_ACCESS_PERMITTED           0
-#define ACM_ACCESS_DENIED              -111
-#define ACM_NULL_POINTER_ERROR         -200
+#define ACM_ACCESS_PERMITTED        0
+#define ACM_ACCESS_DENIED           -111
+#define ACM_NULL_POINTER_ERROR      -200
 
 /* primary policy in lower 4 bits */
-#define ACM_NULL_POLICY        0
-#define ACM_CHINESE_WALL_POLICY        1
+#define ACM_NULL_POLICY 0
+#define ACM_CHINESE_WALL_POLICY 1
 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
 
 /* combinations have secondary policy component in higher 4bit */
@@ -67,7 +67,7 @@
 
 /* policy: */
 #define ACM_POLICY_NAME(X) \
-       ((X) == (ACM_NULL_POLICY)) ? "NULL policy" :                        \
+ ((X) == (ACM_NULL_POLICY)) ? "NULL policy" :                        \
     ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL policy" :        \
     ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT 
policy" : \
     ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE 
WALL AND SIMPLE TYPE ENFORCEMENT policy" : \
@@ -77,17 +77,20 @@
  * whenever the interpretation of the related
  * policy's data structure changes
  */
-#define ACM_POLICY_VERSION     1
-#define ACM_CHWALL_VERSION     1
-#define ACM_STE_VERSION                1
+#define ACM_POLICY_VERSION 1
+#define ACM_CHWALL_VERSION 1
+#define ACM_STE_VERSION  1
 
 /* defines a ssid reference used by xen */
-typedef u32 ssidref_t;
+typedef uint32_t ssidref_t;
+
+/* hooks that are known to domains */
+enum acm_hook_type {NONE=0, SHARING};
 
 /* -------security policy relevant type definitions-------- */
 
 /* type identifier; compares to "equal" or "not equal" */
-typedef u16 domaintype_t;
+typedef uint16_t domaintype_t;
 
 /* CHINESE WALL POLICY DATA STRUCTURES
  *
@@ -109,7 +112,7 @@
  *    with type i and is "1" otherwise.
  */
 /* high-16 = version, low-16 = check magic */
-#define ACM_MAGIC              0x0001debc
+#define ACM_MAGIC  0x0001debc
 
 /* each offset in bytes from start of the struct they
  * are part of */
@@ -123,62 +126,72 @@
  * tools that assume packed representations (e.g. the java tool)
  */
 struct acm_policy_buffer {
-       u32 policy_version; /* ACM_POLICY_VERSION */
-    u32 magic;
-       u32 len;
-       u32 primary_policy_code;
-       u32 primary_buffer_offset;
-       u32 secondary_policy_code;
-       u32 secondary_buffer_offset;
+    uint32_t policy_version; /* ACM_POLICY_VERSION */
+    uint32_t magic;
+    uint32_t len;
+    uint32_t primary_policy_code;
+    uint32_t primary_buffer_offset;
+    uint32_t secondary_policy_code;
+    uint32_t secondary_buffer_offset;
 };
 
 struct acm_chwall_policy_buffer {
-       u32 policy_version; /* ACM_CHWALL_VERSION */
-       u32 policy_code;
-       u32 chwall_max_types;
-       u32 chwall_max_ssidrefs;
-       u32 chwall_max_conflictsets;
-       u32 chwall_ssid_offset;
-       u32 chwall_conflict_sets_offset;
-       u32 chwall_running_types_offset;
-       u32 chwall_conflict_aggregate_offset;
+    uint32_t policy_version; /* ACM_CHWALL_VERSION */
+    uint32_t policy_code;
+    uint32_t chwall_max_types;
+    uint32_t chwall_max_ssidrefs;
+    uint32_t chwall_max_conflictsets;
+    uint32_t chwall_ssid_offset;
+    uint32_t chwall_conflict_sets_offset;
+    uint32_t chwall_running_types_offset;
+    uint32_t chwall_conflict_aggregate_offset;
 };
 
 struct acm_ste_policy_buffer {
-       u32 policy_version; /* ACM_STE_VERSION */
-       u32 policy_code;
-       u32 ste_max_types;
-       u32 ste_max_ssidrefs;
-       u32 ste_ssid_offset;
+    uint32_t policy_version; /* ACM_STE_VERSION */
+    uint32_t policy_code;
+    uint32_t ste_max_types;
+    uint32_t ste_max_ssidrefs;
+    uint32_t ste_ssid_offset;
 };
 
 struct acm_stats_buffer {
-    u32 magic;
-       u32 len;
-       u32 primary_policy_code;
-       u32 primary_stats_offset;
-       u32 secondary_policy_code;
-       u32 secondary_stats_offset;
+    uint32_t magic;
+    uint32_t len;
+    uint32_t primary_policy_code;
+    uint32_t primary_stats_offset;
+    uint32_t secondary_policy_code;
+    uint32_t secondary_stats_offset;
 };
 
 struct acm_ste_stats_buffer {
-       u32 ec_eval_count;
-       u32 gt_eval_count;
-       u32 ec_denied_count;
-       u32 gt_denied_count; 
-       u32 ec_cachehit_count;
-       u32 gt_cachehit_count;
+    uint32_t ec_eval_count;
+    uint32_t gt_eval_count;
+    uint32_t ec_denied_count;
+    uint32_t gt_denied_count; 
+    uint32_t ec_cachehit_count;
+    uint32_t gt_cachehit_count;
 };
 
 struct acm_ssid_buffer {
-       u32 len;
+    uint32_t len;
     ssidref_t ssidref;
-       u32 primary_policy_code;
-       u32 primary_max_types;
-    u32 primary_types_offset;
-       u32 secondary_policy_code;
-    u32 secondary_max_types;
-       u32 secondary_types_offset;
+    uint32_t primary_policy_code;
+    uint32_t primary_max_types;
+    uint32_t primary_types_offset;
+    uint32_t secondary_policy_code;
+    uint32_t secondary_max_types;
+    uint32_t secondary_types_offset;
 };
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/acm_ops.h
--- a/xen/include/public/acm_ops.h      Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/acm_ops.h      Mon Oct 24 15:08:13 2005
@@ -1,4 +1,3 @@
-
 /******************************************************************************
  * acm_ops.h
  *
@@ -28,54 +27,81 @@
  * This makes sure that old versions of acm tools will stop working in a
  * well-defined way (rather than crashing the machine, for instance).
  */
-#define ACM_INTERFACE_VERSION   0xAAAA0004
+#define ACM_INTERFACE_VERSION   0xAAAA0005
 
 /************************************************************************/
 
-#define ACM_SETPOLICY          4
-typedef struct acm_setpolicy {
+#define ACM_SETPOLICY         4
+struct acm_setpolicy {
     /* OUT variables */
     void *pushcache;
-    u16 pushcache_size;
-} acm_setpolicy_t;
+    uint32_t pushcache_size;
+};
 
 
-#define ACM_GETPOLICY          5
-typedef struct acm_getpolicy {
+#define ACM_GETPOLICY         5
+struct acm_getpolicy {
     /* OUT variables */
     void *pullcache;
-    u16 pullcache_size;
-} acm_getpolicy_t;
+    uint32_t pullcache_size;
+};
 
 
-#define ACM_DUMPSTATS          6
-typedef struct acm_dumpstats {
+#define ACM_DUMPSTATS         6
+struct acm_dumpstats {
     void *pullcache;
-    u16 pullcache_size;
-} acm_dumpstats_t;
+    uint32_t pullcache_size;
+};
 
 
-#define ACM_GETSSID            7
-enum get_type {UNSET, SSIDREF, DOMAINID};
-typedef struct acm_getssid {
-       enum get_type get_ssid_by;
-       union {
-               domaintype_t domainid;
-               ssidref_t    ssidref;
-       } id;
+#define ACM_GETSSID           7
+enum get_type {UNSET=0, SSIDREF, DOMAINID};
+struct acm_getssid {
+    enum get_type get_ssid_by;
+    union {
+        domaintype_t domainid;
+        ssidref_t    ssidref;
+    } id;
     void *ssidbuf;
-    u16 ssidbuf_size;
-} acm_getssid_t;
+    uint16_t ssidbuf_size;
+};
 
-typedef struct acm_op {
-    u32 cmd;
-    u32 interface_version;      /* ACM_INTERFACE_VERSION */
+#define ACM_GETDECISION        8
+struct acm_getdecision {
+    enum get_type get_decision_by1; /* in */
+    enum get_type get_decision_by2;
     union {
-        acm_setpolicy_t setpolicy;
-        acm_getpolicy_t getpolicy;
-        acm_dumpstats_t dumpstats;
-        acm_getssid_t getssid;
+        domaintype_t domainid;
+        ssidref_t    ssidref;
+    } id1;
+    union {
+        domaintype_t domainid;
+        ssidref_t    ssidref;
+    } id2;
+    enum acm_hook_type hook;
+    int acm_decision;           /* out */
+};
+
+struct acm_op {
+    uint32_t cmd;
+    uint32_t interface_version;      /* ACM_INTERFACE_VERSION */
+    union {
+        struct acm_setpolicy setpolicy;
+        struct acm_getpolicy getpolicy;
+        struct acm_dumpstats dumpstats;
+        struct acm_getssid getssid;
+        struct acm_getdecision getdecision;
     } u;
-} acm_op_t;
+};
 
 #endif                          /* __XEN_PUBLIC_ACM_OPS_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/arch-ia64.h    Mon Oct 24 15:08:13 2005
@@ -13,30 +13,30 @@
 
 #ifndef __ASSEMBLY__
 
-#define MAX_NR_SECTION  32  // at most 32 memory holes
-typedef struct {
-    unsigned long      start;  /* start of memory hole */
-    unsigned long      end;    /* end of memory hole */
+#define MAX_NR_SECTION  32  /* at most 32 memory holes */
+typedef struct {
+    unsigned long start;  /* start of memory hole */
+    unsigned long end;    /* end of memory hole */
 } mm_section_t;
 
 typedef struct {
-    unsigned long      mfn : 56;
-    unsigned long      type: 8;
+    unsigned long mfn : 56;
+    unsigned long type: 8;
 } pmt_entry_t;
 
-#define GPFN_MEM               (0UL << 56)     /* Guest pfn is normal mem */
-#define GPFN_FRAME_BUFFER      (1UL << 56)     /* VGA framebuffer */
-#define GPFN_LOW_MMIO          (2UL << 56)     /* Low MMIO range */
-#define GPFN_PIB               (3UL << 56)     /* PIB base */
-#define GPFN_IOSAPIC           (4UL << 56)     /* IOSAPIC base */
-#define GPFN_LEGACY_IO         (5UL << 56)     /* Legacy I/O base */
-#define GPFN_GFW               (6UL << 56)     /* Guest Firmware */
-#define GPFN_HIGH_MMIO         (7UL << 56)     /* High MMIO range */
-
-#define GPFN_IO_MASK           (7UL << 56)     /* Guest pfn is I/O type */
-#define GPFN_INV_MASK          (31UL << 59)    /* Guest pfn is invalid */
-
-#define INVALID_MFN              (~0UL)
+#define GPFN_MEM          (0UL << 56) /* Guest pfn is normal mem */
+#define GPFN_FRAME_BUFFER (1UL << 56) /* VGA framebuffer */
+#define GPFN_LOW_MMIO     (2UL << 56) /* Low MMIO range */
+#define GPFN_PIB          (3UL << 56) /* PIB base */
+#define GPFN_IOSAPIC      (4UL << 56) /* IOSAPIC base */
+#define GPFN_LEGACY_IO    (5UL << 56) /* Legacy I/O base */
+#define GPFN_GFW          (6UL << 56) /* Guest Firmware */
+#define GPFN_HIGH_MMIO    (7UL << 56) /* High MMIO range */
+
+#define GPFN_IO_MASK     (7UL << 56)  /* Guest pfn is I/O type */
+#define GPFN_INV_MASK    (31UL << 59) /* Guest pfn is invalid */
+
+#define INVALID_MFN       (~0UL)
 
 #define MEM_G   (1UL << 30)    
 #define MEM_M   (1UL << 20)    
@@ -75,198 +75,198 @@
 } tsc_timestamp_t; /* 8 bytes */
 
 struct pt_fpreg {
-        union {
-                unsigned long bits[2];
-                long double __dummy;    /* force 16-byte alignment */
-        } u;
+    union {
+        unsigned long bits[2];
+        long double __dummy;    /* force 16-byte alignment */
+    } u;
 };
 
 typedef struct cpu_user_regs{
-       /* The following registers are saved by SAVE_MIN: */
-       unsigned long b6;               /* scratch */
-       unsigned long b7;               /* scratch */
-
-       unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
-       unsigned long ar_ssd;           /* reserved for future use (scratch) */
-
-       unsigned long r8;               /* scratch (return value register 0) */
-       unsigned long r9;               /* scratch (return value register 1) */
-       unsigned long r10;              /* scratch (return value register 2) */
-       unsigned long r11;              /* scratch (return value register 3) */
-
-       unsigned long cr_ipsr;          /* interrupted task's psr */
-       unsigned long cr_iip;           /* interrupted task's instruction 
pointer */
-       unsigned long cr_ifs;           /* interrupted task's function state */
-
-       unsigned long ar_unat;          /* interrupted task's NaT register 
(preserved) */
-       unsigned long ar_pfs;           /* prev function state  */
-       unsigned long ar_rsc;           /* RSE configuration */
-       /* The following two are valid only if cr_ipsr.cpl > 0: */
-       unsigned long ar_rnat;          /* RSE NaT */
-       unsigned long ar_bspstore;      /* RSE bspstore */
-
-       unsigned long pr;               /* 64 predicate registers (1 bit each) 
*/
-       unsigned long b0;               /* return pointer (bp) */
-       unsigned long loadrs;           /* size of dirty partition << 16 */
-
-       unsigned long r1;               /* the gp pointer */
-       unsigned long r12;              /* interrupted task's memory stack 
pointer */
-       unsigned long r13;              /* thread pointer */
-
-       unsigned long ar_fpsr;          /* floating point status (preserved) */
-       unsigned long r15;              /* scratch */
-
-       /* The remaining registers are NOT saved for system calls.  */
-
-       unsigned long r14;              /* scratch */
-       unsigned long r2;               /* scratch */
-       unsigned long r3;               /* scratch */
-    unsigned long r16;         /* scratch */
-       unsigned long r17;              /* scratch */
-    unsigned long r18;         /* scratch */
-       unsigned long r19;              /* scratch */
-    unsigned long r20;         /* scratch */
-       unsigned long r21;              /* scratch */
-       unsigned long r22;              /* scratch */
-       unsigned long r23;              /* scratch */
-       unsigned long r24;              /* scratch */
-       unsigned long r25;              /* scratch */
-       unsigned long r26;              /* scratch */
-       unsigned long r27;              /* scratch */
-       unsigned long r28;              /* scratch */
-       unsigned long r29;              /* scratch */
-       unsigned long r30;              /* scratch */
-       unsigned long r31;              /* scratch */
-       unsigned long ar_ccv;           /* compare/exchange value (scratch) */
-
-       /*
-        * Floating point registers that the kernel considers scratch:
-        */
-       struct pt_fpreg f6;             /* scratch */
-       struct pt_fpreg f7;             /* scratch */
-       struct pt_fpreg f8;             /* scratch */
-       struct pt_fpreg f9;             /* scratch */
-       struct pt_fpreg f10;            /* scratch */
-       struct pt_fpreg f11;            /* scratch */
-       unsigned long r4;               /* preserved */
-    unsigned long r5;          /* preserved */
-       unsigned long r6;               /* preserved */
-    unsigned long r7;          /* preserved */
-       unsigned long eml_unat;    /* used for emulating instruction */
+    /* The following registers are saved by SAVE_MIN: */
+    unsigned long b6;  /* scratch */
+    unsigned long b7;  /* scratch */
+
+    unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+    unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+    unsigned long r8;  /* scratch (return value register 0) */
+    unsigned long r9;  /* scratch (return value register 1) */
+    unsigned long r10; /* scratch (return value register 2) */
+    unsigned long r11; /* scratch (return value register 3) */
+
+    unsigned long cr_ipsr; /* interrupted task's psr */
+    unsigned long cr_iip;  /* interrupted task's instruction pointer */
+    unsigned long cr_ifs;  /* interrupted task's function state */
+
+    unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+    unsigned long ar_pfs;  /* prev function state  */
+    unsigned long ar_rsc;  /* RSE configuration */
+    /* The following two are valid only if cr_ipsr.cpl > 0: */
+    unsigned long ar_rnat;  /* RSE NaT */
+    unsigned long ar_bspstore; /* RSE bspstore */
+
+    unsigned long pr;  /* 64 predicate registers (1 bit each) */
+    unsigned long b0;  /* return pointer (bp) */
+    unsigned long loadrs;  /* size of dirty partition << 16 */
+
+    unsigned long r1;  /* the gp pointer */
+    unsigned long r12; /* interrupted task's memory stack pointer */
+    unsigned long r13; /* thread pointer */
+
+    unsigned long ar_fpsr;  /* floating point status (preserved) */
+    unsigned long r15;  /* scratch */
+
+ /* The remaining registers are NOT saved for system calls.  */
+
+    unsigned long r14;  /* scratch */
+    unsigned long r2;  /* scratch */
+    unsigned long r3;  /* scratch */
+    unsigned long r16;  /* scratch */
+    unsigned long r17;  /* scratch */
+    unsigned long r18;  /* scratch */
+    unsigned long r19;  /* scratch */
+    unsigned long r20;  /* scratch */
+    unsigned long r21;  /* scratch */
+    unsigned long r22;  /* scratch */
+    unsigned long r23;  /* scratch */
+    unsigned long r24;  /* scratch */
+    unsigned long r25;  /* scratch */
+    unsigned long r26;  /* scratch */
+    unsigned long r27;  /* scratch */
+    unsigned long r28;  /* scratch */
+    unsigned long r29;  /* scratch */
+    unsigned long r30;  /* scratch */
+    unsigned long r31;  /* scratch */
+    unsigned long ar_ccv;  /* compare/exchange value (scratch) */
+
+    /*
+     * Floating point registers that the kernel considers scratch:
+     */
+    struct pt_fpreg f6;  /* scratch */
+    struct pt_fpreg f7;  /* scratch */
+    struct pt_fpreg f8;  /* scratch */
+    struct pt_fpreg f9;  /* scratch */
+    struct pt_fpreg f10;  /* scratch */
+    struct pt_fpreg f11;  /* scratch */
+    unsigned long r4;  /* preserved */
+    unsigned long r5;  /* preserved */
+    unsigned long r6;  /* preserved */
+    unsigned long r7;  /* preserved */
+    unsigned long eml_unat;    /* used for emulating instruction */
     unsigned long rfi_pfs;     /* used for elulating rfi */
 
 }cpu_user_regs_t;
 
 typedef union {
-       unsigned long value;
-       struct {
-               int     a_int:1;
-               int     a_from_int_cr:1;
-               int     a_to_int_cr:1;
-               int     a_from_psr:1;
-               int     a_from_cpuid:1;
-               int     a_cover:1;
-               int     a_bsw:1;
-               long    reserved:57;
-       };
+    unsigned long value;
+    struct {
+        int a_int:1;
+        int a_from_int_cr:1;
+        int a_to_int_cr:1;
+        int a_from_psr:1;
+        int a_from_cpuid:1;
+        int a_cover:1;
+        int a_bsw:1;
+        long reserved:57;
+    };
 } vac_t;
 
 typedef union {
-       unsigned long value;
-       struct {
-               int     d_vmsw:1;
-               int     d_extint:1;
-               int     d_ibr_dbr:1;
-               int     d_pmc:1;
-               int     d_to_pmd:1;
-               int     d_itm:1;
-               long    reserved:58;
-       };
+    unsigned long value;
+    struct {
+        int d_vmsw:1;
+        int d_extint:1;
+        int d_ibr_dbr:1;
+        int d_pmc:1;
+        int d_to_pmd:1;
+        int d_itm:1;
+        long reserved:58;
+    };
 } vdc_t;
 
 typedef struct {
-       vac_t                   vac;
-       vdc_t                   vdc;
-       unsigned long           virt_env_vaddr;
-       unsigned long           reserved1[29];
-       unsigned long           vhpi;
-       unsigned long           reserved2[95];
-       union {
-         unsigned long         vgr[16];
-         unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 
active
-       };
-       union {
-         unsigned long         vbgr[16];
-         unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 
active
-       };
-       unsigned long           vnat;
-       unsigned long           vbnat;
-       unsigned long           vcpuid[5];
-       unsigned long           reserved3[11];
-       unsigned long           vpsr;
-       unsigned long           vpr;
-       unsigned long           reserved4[76];
-       union {
-         unsigned long         vcr[128];
-          struct {
-           unsigned long       dcr;            // CR0
-           unsigned long       itm;
-           unsigned long       iva;
-           unsigned long       rsv1[5];
-           unsigned long       pta;            // CR8
-           unsigned long       rsv2[7];
-           unsigned long       ipsr;           // CR16
-           unsigned long       isr;
-           unsigned long       rsv3;
-           unsigned long       iip;
-           unsigned long       ifa;
-           unsigned long       itir;
-           unsigned long       iipa;
-           unsigned long       ifs;
-           unsigned long       iim;            // CR24
-           unsigned long       iha;
-           unsigned long       rsv4[38];
-           unsigned long       lid;            // CR64
-           unsigned long       ivr;
-           unsigned long       tpr;
-           unsigned long       eoi;
-           unsigned long       irr[4];
-           unsigned long       itv;            // CR72
-           unsigned long       pmv;
-           unsigned long       cmcv;
-           unsigned long       rsv5[5];
-           unsigned long       lrr0;           // CR80
-           unsigned long       lrr1;
-           unsigned long       rsv6[46];
-          };
-       };
-       union {
-         unsigned long         reserved5[128];
-         struct {
-           unsigned long precover_ifs;
-           unsigned long unat;  // not sure if this is needed until NaT arch 
is done
-           int interrupt_collection_enabled; // virtual psr.ic
-           int interrupt_delivery_enabled; // virtual psr.i
-           int pending_interruption;
-           int incomplete_regframe;    // see SDM vol2 6.8
-           unsigned long delivery_mask[4];
-           int metaphysical_mode;      // 1 = use metaphys mapping, 0 = use 
virtual
-           int banknum;        // 0 or 1, which virtual register bank is active
-           unsigned long rrs[8];       // region registers
-           unsigned long krs[8];       // kernel registers
-           unsigned long pkrs[8];      // protection key registers
-           unsigned long tmp[8];       // temp registers (e.g. for 
hyperprivops)
-               // FIXME: tmp[8] temp'ly being used for virtual psr.pp
-         };
+    vac_t   vac;
+    vdc_t   vdc;
+    unsigned long  virt_env_vaddr;
+    unsigned long  reserved1[29];
+    unsigned long  vhpi;
+    unsigned long  reserved2[95];
+    union {
+        unsigned long  vgr[16];
+        unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
+    };
+    union {
+        unsigned long  vbgr[16];
+        unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
+    };
+    unsigned long  vnat;
+    unsigned long  vbnat;
+    unsigned long  vcpuid[5];
+    unsigned long  reserved3[11];
+    unsigned long  vpsr;
+    unsigned long  vpr;
+    unsigned long  reserved4[76];
+    union {
+        unsigned long  vcr[128];
+        struct {
+            unsigned long dcr;  // CR0
+            unsigned long itm;
+            unsigned long iva;
+            unsigned long rsv1[5];
+            unsigned long pta;  // CR8
+            unsigned long rsv2[7];
+            unsigned long ipsr;  // CR16
+            unsigned long isr;
+            unsigned long rsv3;
+            unsigned long iip;
+            unsigned long ifa;
+            unsigned long itir;
+            unsigned long iipa;
+            unsigned long ifs;
+            unsigned long iim;  // CR24
+            unsigned long iha;
+            unsigned long rsv4[38];
+            unsigned long lid;  // CR64
+            unsigned long ivr;
+            unsigned long tpr;
+            unsigned long eoi;
+            unsigned long irr[4];
+            unsigned long itv;  // CR72
+            unsigned long pmv;
+            unsigned long cmcv;
+            unsigned long rsv5[5];
+            unsigned long lrr0;  // CR80
+            unsigned long lrr1;
+            unsigned long rsv6[46];
         };
-       unsigned long           reserved6[3456];
-       unsigned long           vmm_avail[128];
-       unsigned long           reserved7[4096];
+    };
+    union {
+        unsigned long  reserved5[128];
+        struct {
+            unsigned long precover_ifs;
+            unsigned long unat;  // not sure if this is needed until NaT arch 
is done
+            int interrupt_collection_enabled; // virtual psr.ic
+            int interrupt_delivery_enabled; // virtual psr.i
+            int pending_interruption;
+            int incomplete_regframe; // see SDM vol2 6.8
+            unsigned long delivery_mask[4];
+            int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
+            int banknum; // 0 or 1, which virtual register bank is active
+            unsigned long rrs[8]; // region registers
+            unsigned long krs[8]; // kernel registers
+            unsigned long pkrs[8]; // protection key registers
+            unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
+            // FIXME: tmp[8] temp'ly being used for virtual psr.pp
+        };
+    };
+    unsigned long  reserved6[3456];
+    unsigned long  vmm_avail[128];
+    unsigned long  reserved7[4096];
 } mapped_regs_t;
 
 typedef struct {
-       mapped_regs_t *privregs;
-       int evtchn_vector;
+    mapped_regs_t *privregs;
+    int evtchn_vector;
 } arch_vcpu_info_t;
 
 typedef mapped_regs_t vpd_t;
@@ -274,25 +274,35 @@
 #define __ARCH_HAS_VCPU_INFO
 
 typedef struct {
-       unsigned int flags;
-       unsigned long start_info_pfn;
-} arch_shared_info_t;          // DON'T PACK 
+    unsigned int flags;
+    unsigned long start_info_pfn;
+} arch_shared_info_t;  // DON'T PACK 
 
 typedef struct vcpu_guest_context {
 #define VGCF_FPU_VALID (1<<0)
 #define VGCF_VMX_GUEST (1<<1)
 #define VGCF_IN_KERNEL (1<<2)
-       unsigned long flags;       /* VGCF_* flags */
-       unsigned long pt_base;     /* PMT table base */
-       unsigned long share_io_pg; /* Shared page for I/O emulation */
-       unsigned long sys_pgnr;    /* System pages out of domain memory */
-       unsigned long vm_assist;   /* VMASST_TYPE_* bitmap, now none on IPF */
-
-       cpu_user_regs_t regs;
-       arch_vcpu_info_t vcpu;
-       arch_shared_info_t shared;
+    unsigned long flags;       /* VGCF_* flags */
+    unsigned long pt_base;     /* PMT table base */
+    unsigned long share_io_pg; /* Shared page for I/O emulation */
+    unsigned long sys_pgnr;    /* System pages out of domain memory */
+    unsigned long vm_assist;   /* VMASST_TYPE_* bitmap, now none on IPF */
+
+    cpu_user_regs_t regs;
+    arch_vcpu_info_t vcpu;
+    arch_shared_info_t shared;
 } vcpu_guest_context_t;
 
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __HYPERVISOR_IF_IA64_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/arch-x86_32.h
--- a/xen/include/public/arch-x86_32.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/arch-x86_32.h  Mon Oct 24 15:08:13 2005
@@ -44,7 +44,6 @@
 /* And the trap vector is... */
 #define TRAP_INSTR "int $0x82"
 
-
 /*
  * Virtual addresses beyond this are not modifiable by guest OSes. The 
  * machine->physical mapping table starts at this address, read-only.
@@ -71,36 +70,36 @@
 #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
 #define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
 typedef struct trap_info {
-    u8       vector;       /* exception vector                              */
-    u8       flags;        /* 0-3: privilege level; 4: clear event enable?  */
-    u16      cs;           /* code selector                                 */
+    uint8_t       vector;  /* exception vector                              */
+    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
+    uint16_t      cs;      /* code selector                                 */
     unsigned long address; /* code offset                                   */
 } trap_info_t;
 
 typedef struct cpu_user_regs {
-    u32 ebx;
-    u32 ecx;
-    u32 edx;
-    u32 esi;
-    u32 edi;
-    u32 ebp;
-    u32 eax;
-    u16 error_code;    /* private */
-    u16 entry_vector;  /* private */
-    u32 eip;
-    u16 cs;
-    u8  saved_upcall_mask;
-    u8  _pad0;
-    u32 eflags;
-    u32 esp;
-    u16 ss, _pad1;
-    u16 es, _pad2;
-    u16 ds, _pad3;
-    u16 fs, _pad4;
-    u16 gs, _pad5;
+    uint32_t ebx;
+    uint32_t ecx;
+    uint32_t edx;
+    uint32_t esi;
+    uint32_t edi;
+    uint32_t ebp;
+    uint32_t eax;
+    uint16_t error_code;    /* private */
+    uint16_t entry_vector;  /* private */
+    uint32_t eip;
+    uint16_t cs;
+    uint8_t  saved_upcall_mask;
+    uint8_t  _pad0;
+    uint32_t eflags;
+    uint32_t esp;
+    uint16_t ss, _pad1;
+    uint16_t es, _pad2;
+    uint16_t ds, _pad3;
+    uint16_t fs, _pad4;
+    uint16_t gs, _pad5;
 } cpu_user_regs_t;
 
-typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
 
 /*
  * The following is all CPU context. Note that the fpu_ctxt block is filled 
@@ -129,12 +128,20 @@
 
 typedef struct arch_shared_info {
     unsigned long max_pfn;                  /* max pfn that appears in table */
+    /* Frame containing list of mfns containing list of mfns containing p2m. */
     unsigned long pfn_to_mfn_frame_list_list; 
-                                            /* frame containing list of mfns
-                                              containing list of mfns 
-                                              containing the p2m table. */
 } arch_shared_info_t;
 
 #endif
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/arch-x86_64.h
--- a/xen/include/public/arch-x86_64.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/arch-x86_64.h  Mon Oct 24 15:08:13 2005
@@ -99,7 +99,7 @@
 #define VGCF_IN_SYSCALL (1<<8)
 struct switch_to_user {
     /* Top of stack (%rsp at point of hypercall). */
-    u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
+    uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
     /* Bottom of switch_to_user stack frame. */
 };
 
@@ -118,54 +118,54 @@
 #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
 #define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
 typedef struct trap_info {
-    u8       vector;       /* exception vector                              */
-    u8       flags;        /* 0-3: privilege level; 4: clear event enable?  */
-    u16      cs;           /* code selector                                 */
+    uint8_t       vector;  /* exception vector                              */
+    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
+    uint16_t      cs;      /* code selector                                 */
     unsigned long address; /* code offset                                   */
 } trap_info_t;
 
 #ifdef __GNUC__
 /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
-#define __DECL_REG(name) union { u64 r ## name, e ## name; }
+#define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
 #else
 /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
-#define __DECL_REG(name) u64 r ## name
+#define __DECL_REG(name) uint64_t r ## name
 #endif
 
 typedef struct cpu_user_regs {
-    u64 r15;
-    u64 r14;
-    u64 r13;
-    u64 r12;
+    uint64_t r15;
+    uint64_t r14;
+    uint64_t r13;
+    uint64_t r12;
     __DECL_REG(bp);
     __DECL_REG(bx);
-    u64 r11;
-    u64 r10;
-    u64 r9;
-    u64 r8;
+    uint64_t r11;
+    uint64_t r10;
+    uint64_t r9;
+    uint64_t r8;
     __DECL_REG(ax);
     __DECL_REG(cx);
     __DECL_REG(dx);
     __DECL_REG(si);
     __DECL_REG(di);
-    u32 error_code;    /* private */
-    u32 entry_vector;  /* private */
+    uint32_t error_code;    /* private */
+    uint32_t entry_vector;  /* private */
     __DECL_REG(ip);
-    u16 cs, _pad0[1];
-    u8  saved_upcall_mask;
-    u8  _pad1[3];
+    uint16_t cs, _pad0[1];
+    uint8_t  saved_upcall_mask;
+    uint8_t  _pad1[3];
     __DECL_REG(flags);
     __DECL_REG(sp);
-    u16 ss, _pad2[3];
-    u16 es, _pad3[3];
-    u16 ds, _pad4[3];
-    u16 fs, _pad5[3]; /* Non-zero => takes precedence over fs_base.      */
-    u16 gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_user. */
+    uint16_t ss, _pad2[3];
+    uint16_t es, _pad3[3];
+    uint16_t ds, _pad4[3];
+    uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base.     */
+    uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
 } cpu_user_regs_t;
 
 #undef __DECL_REG
 
-typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
 
 /*
  * The following is all CPU context. Note that the fpu_ctxt block is filled 
@@ -190,19 +190,27 @@
     unsigned long syscall_callback_eip;
     unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
     /* Segment base addresses. */
-    u64           fs_base;
-    u64           gs_base_kernel;
-    u64           gs_base_user;
+    uint64_t      fs_base;
+    uint64_t      gs_base_kernel;
+    uint64_t      gs_base_user;
 } vcpu_guest_context_t;
 
 typedef struct arch_shared_info {
     unsigned long max_pfn;                  /* max pfn that appears in table */
+    /* Frame containing list of mfns containing list of mfns containing p2m. */
     unsigned long pfn_to_mfn_frame_list_list; 
-                                            /* frame containing list of mfns
-                                              containing list of mfns 
-                                              containing the p2m table. */
 } arch_shared_info_t;
 
 #endif /* !__ASSEMBLY__ */
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h     Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/dom0_ops.h     Mon Oct 24 15:08:13 2005
@@ -44,7 +44,8 @@
 #define DOM0_CREATEDOMAIN      8
 typedef struct {
     /* IN parameters */
-    u32 ssidref;
+    uint32_t ssidref;
+    xen_domain_handle_t handle;
     /* IN/OUT parameters. */
     /* Identifier for new domain (auto-allocate if zero is specified). */
     domid_t domain;
@@ -53,7 +54,7 @@
 #define DOM0_DESTROYDOMAIN     9
 typedef struct {
     /* IN variables. */
-    domid_t      domain;
+    domid_t domain;
 } dom0_destroydomain_t;
 
 #define DOM0_PAUSEDOMAIN      10
@@ -82,22 +83,22 @@
 #define DOMFLAGS_CPUSHIFT       8
 #define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
 #define DOMFLAGS_SHUTDOWNSHIFT 16
-    u32      flags;
+    uint32_t flags;
     unsigned long tot_pages;
     unsigned long max_pages;
     unsigned long shared_info_frame;       /* MFN of shared_info struct */
-    u64      cpu_time;
-    u32      n_vcpu;
-    s32      vcpu_to_cpu[MAX_VIRT_CPUS];  /* current mapping   */
-    cpumap_t cpumap[MAX_VIRT_CPUS];       /* allowable mapping */
-    u32             ssidref;
+    uint64_t cpu_time;
+    uint32_t nr_online_vcpus;     /* Number of VCPUs currently online. */
+    uint32_t max_vcpu_id;         /* Maximum VCPUID in use by this domain. */
+    uint32_t ssidref;
+    xen_domain_handle_t handle;
 } dom0_getdomaininfo_t;
 
 #define DOM0_SETDOMAININFO      13
 typedef struct {
     /* IN variables. */
-    domid_t                   domain;
-    u16                       vcpu;
+    domid_t               domain;
+    uint16_t              vcpu;
     /* IN/OUT parameters */
     vcpu_guest_context_t *ctxt;
 } dom0_setdomaininfo_t;
@@ -105,29 +106,29 @@
 #define DOM0_MSR              15
 typedef struct {
     /* IN variables. */
-    u32 write;
-    u32 cpu_mask;
-    u32 msr;
-    u32 in1;
-    u32 in2;
-    /* OUT variables. */
-    u32 out1;
-    u32 out2;
+    uint32_t write;
+    uint32_t cpu_mask;
+    uint32_t msr;
+    uint32_t in1;
+    uint32_t in2;
+    /* OUT variables. */
+    uint32_t out1;
+    uint32_t out2;
 } dom0_msr_t;
 
 #define DOM0_DEBUG            16
 typedef struct {
     /* IN variables. */
-    domid_t domain;
-    u8  opcode;
-    u32 in1;
-    u32 in2;
-    u32 in3;
-    u32 in4;
-    /* OUT variables. */
-    u32 status;
-    u32 out1;
-    u32 out2;
+    domid_t  domain;
+    uint8_t  opcode;
+    uint32_t in1;
+    uint32_t in2;
+    uint32_t in3;
+    uint32_t in4;
+    /* OUT variables. */
+    uint32_t status;
+    uint32_t out1;
+    uint32_t out2;
 } dom0_debug_t;
 
 /*
@@ -137,9 +138,9 @@
 #define DOM0_SETTIME          17
 typedef struct {
     /* IN variables. */
-    u32 secs;
-    u32 nsecs;
-    u64 system_time;
+    uint32_t secs;
+    uint32_t nsecs;
+    uint64_t system_time;
 } dom0_settime_t;
 
 #define DOM0_GETPAGEFRAMEINFO 18
@@ -159,7 +160,7 @@
     domid_t domain;        /* To which domain does the frame belong?    */
     /* OUT variables. */
     /* Is the page PINNED to a type? */
-    u32 type;              /* see above type defs */
+    uint32_t type;              /* see above type defs */
 } dom0_getpageframeinfo_t;
 
 /*
@@ -168,10 +169,10 @@
 #define DOM0_READCONSOLE      19
 typedef struct {
     /* IN variables. */
-    u32      clear;        /* Non-zero -> clear after reading. */
+    uint32_t clear;        /* Non-zero -> clear after reading. */
     /* IN/OUT variables. */
     char    *buffer;       /* In: Buffer start; Out: Used buffer start */
-    u32      count;        /* In: Buffer size;  Out: Used buffer size  */
+    uint32_t count;        /* In: Buffer size;  Out: Used buffer size  */
 } dom0_readconsole_t;
 
 /* 
@@ -180,9 +181,9 @@
 #define DOM0_PINCPUDOMAIN     20
 typedef struct {
     /* IN variables. */
-    domid_t      domain;
-    u16          vcpu;
-    cpumap_t     *cpumap;
+    domid_t   domain;
+    uint16_t  vcpu;
+    cpumap_t cpumap;
 } dom0_pincpudomain_t;
 
 /* Get trace buffers machine base address */
@@ -192,13 +193,13 @@
 #define DOM0_TBUF_GET_INFO     0
 #define DOM0_TBUF_SET_CPU_MASK 1
 #define DOM0_TBUF_SET_EVT_MASK 2
-    u8 op;
+    uint8_t op;
     /* IN/OUT variables */
     unsigned long cpu_mask;
-    u32           evt_mask;
+    uint32_t      evt_mask;
     /* OUT variables */
     unsigned long buffer_mfn;
-    u32      size;
+    uint32_t size;
 } dom0_tbufcontrol_t;
 
 /*
@@ -206,14 +207,14 @@
  */
 #define DOM0_PHYSINFO         22
 typedef struct {
-    u32      threads_per_core;
-    u32      cores_per_socket;
-    u32      sockets_per_node;
-    u32      nr_nodes;
-    u32      cpu_khz;
+    uint32_t threads_per_core;
+    uint32_t cores_per_socket;
+    uint32_t sockets_per_node;
+    uint32_t nr_nodes;
+    uint32_t cpu_khz;
     unsigned long total_pages;
     unsigned long free_pages;
-    u32      hw_cap[8];
+    uint32_t hw_cap[8];
 } dom0_physinfo_t;
 
 /*
@@ -222,7 +223,7 @@
 #define DOM0_SCHED_ID        24
 typedef struct {
     /* OUT variable */
-    u32 sched_id;
+    uint32_t sched_id;
 } dom0_sched_id_t;
 
 /* 
@@ -241,16 +242,16 @@
 
 typedef struct dom0_shadow_control
 {
-    u32 fault_count;
-    u32 dirty_count;
-    u32 dirty_net_count;     
-    u32 dirty_block_count;     
+    uint32_t fault_count;
+    uint32_t dirty_count;
+    uint32_t dirty_net_count;     
+    uint32_t dirty_block_count;     
 } dom0_shadow_control_stats_t;
 
 typedef struct {
     /* IN variables. */
     domid_t        domain;
-    u32            op;
+    uint32_t       op;
     unsigned long *dirty_bitmap; /* pointer to locked buffer */
     /* IN/OUT variables. */
     unsigned long  pages;        /* size of buffer, updated with actual size */
@@ -286,10 +287,10 @@
     /* IN variables. */
     unsigned long pfn;
     unsigned long nr_pfns;
-    u32           type;
-    /* OUT variables. */
-    u32           handle;
-    u32           reg;
+    uint32_t      type;
+    /* OUT variables. */
+    uint32_t      handle;
+    uint32_t      reg;
 } dom0_add_memtype_t;
 
 /*
@@ -302,19 +303,19 @@
 #define DOM0_DEL_MEMTYPE         32
 typedef struct {
     /* IN variables. */
-    u32      handle;
-    u32      reg;
+    uint32_t handle;
+    uint32_t reg;
 } dom0_del_memtype_t;
 
 /* Read current type of an MTRR (x86-specific). */
 #define DOM0_READ_MEMTYPE        33
 typedef struct {
     /* IN variables. */
-    u32      reg;
+    uint32_t reg;
     /* OUT variables. */
     unsigned long pfn;
     unsigned long nr_pfns;
-    u32      type;
+    uint32_t type;
 } dom0_read_memtype_t;
 
 /* Interface for controlling Xen software performance counters. */
@@ -323,40 +324,55 @@
 #define DOM0_PERFCCONTROL_OP_RESET 1   /* Reset all counters to zero. */
 #define DOM0_PERFCCONTROL_OP_QUERY 2   /* Get perfctr information. */
 typedef struct {
-    u8      name[80];               /*  name of perf counter */
-    u32     nr_vals;                /* number of values for this counter */
-    u32     vals[64];               /* array of values */
+    uint8_t      name[80];             /*  name of perf counter */
+    uint32_t     nr_vals;              /* number of values for this counter */
+    uint32_t     vals[64];             /* array of values */
 } dom0_perfc_desc_t;
 typedef struct {
     /* IN variables. */
-    u32            op;                /*  DOM0_PERFCCONTROL_OP_??? */
-    /* OUT variables. */
-    u32            nr_counters;       /*  number of counters */
+    uint32_t       op;                /*  DOM0_PERFCCONTROL_OP_??? */
+    /* OUT variables. */
+    uint32_t       nr_counters;       /*  number of counters */
     dom0_perfc_desc_t *desc;          /*  counter information (or NULL) */
 } dom0_perfccontrol_t;
 
 #define DOM0_MICROCODE           35
 typedef struct {
     /* IN variables. */
-    void   *data;                     /* Pointer to microcode data */
-    u32     length;                   /* Length of microcode data. */
+    void    *data;                    /* Pointer to microcode data */
+    uint32_t length;                  /* Length of microcode data. */
 } dom0_microcode_t;
 
 #define DOM0_IOPORT_PERMISSION   36
 typedef struct {
-    domid_t domain;                   /* domain to be affected */
-    u16     first_port;               /* first port int range */
-    u16     nr_ports;                 /* size of port range */
-    u16     allow_access;             /* allow or deny access to range? */
+    domid_t  domain;                  /* domain to be affected */
+    uint16_t first_port;              /* first port int range */
+    uint16_t nr_ports;                /* size of port range */
+    uint16_t allow_access;            /* allow or deny access to range? */
 } dom0_ioport_permission_t;
 
 #define DOM0_GETVCPUCONTEXT      37
 typedef struct {
-    domid_t domain;                   /* domain to be affected */
-    u16     vcpu;                     /* vcpu # */
-    vcpu_guest_context_t *ctxt;       /* NB. IN/OUT variable. */
-    u64     cpu_time;                 
+    /* IN variables. */
+    domid_t  domain;                  /* domain to be affected */
+    uint16_t vcpu;                    /* vcpu # */
+    /* OUT variables. */
+    vcpu_guest_context_t *ctxt;
 } dom0_getvcpucontext_t;
+
+#define DOM0_GETVCPUINFO         43
+typedef struct {
+    /* IN variables. */
+    domid_t  domain;                  /* domain to be affected */
+    uint16_t vcpu;                    /* vcpu # */
+    /* OUT variables. */
+    uint8_t  online;                  /* currently online (not hotplugged)? */
+    uint8_t  blocked;                 /* blocked waiting for an event? */
+    uint8_t  running;                 /* currently scheduled on its CPU? */
+    uint64_t cpu_time;                /* total cpu time consumed (ns) */
+    uint32_t cpu;                     /* current mapping   */
+    cpumap_t cpumap;                  /* allowable mapping */
+} dom0_getvcpuinfo_t;
 
 #define DOM0_GETDOMAININFOLIST   38
 typedef struct {
@@ -382,14 +398,26 @@
     /* OUT variables. */
     int nr_map_entries;
     struct dom0_memory_map_entry {
-        u64 start, end;
+        uint64_t start, end;
         int is_ram;
     } *memory_map;
 } dom0_physical_memory_map_t;
 
-typedef struct {
-    u32 cmd;
-    u32 interface_version; /* DOM0_INTERFACE_VERSION */
+#define DOM0_MAX_VCPUS 41
+typedef struct {
+    domid_t domain;             /* domain to be affected */
+    unsigned int max;           /* maximum number of vcpus */
+} dom0_max_vcpus_t;
+
+#define DOM0_SETDOMAINHANDLE 44
+typedef struct {
+    domid_t domain;
+    xen_domain_handle_t handle;
+} dom0_setdomainhandle_t;
+
+typedef struct {
+    uint32_t cmd;
+    uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
     union {
         dom0_createdomain_t      createdomain;
         dom0_pausedomain_t       pausedomain;
@@ -419,10 +447,24 @@
         dom0_microcode_t         microcode;
         dom0_ioport_permission_t ioport_permission;
         dom0_getvcpucontext_t    getvcpucontext;
+        dom0_getvcpuinfo_t       getvcpuinfo;
         dom0_getdomaininfolist_t getdomaininfolist;
         dom0_platform_quirk_t    platform_quirk;
         dom0_physical_memory_map_t physical_memory_map;
+        dom0_max_vcpus_t         max_vcpus;
+        dom0_setdomainhandle_t   setdomainhandle;
+        uint8_t                  pad[128];
     } u;
 } dom0_op_t;
 
 #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/event_channel.h
--- a/xen/include/public/event_channel.h        Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/event_channel.h        Mon Oct 24 15:08:13 2005
@@ -15,13 +15,14 @@
  * is allocated in <dom> and returned as <port>.
  * NOTES:
  *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
+ *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
  */
 #define EVTCHNOP_alloc_unbound    6
 typedef struct evtchn_alloc_unbound {
     /* IN parameters */
-    domid_t dom, remote_dom;
+    domid_t  dom, remote_dom;
     /* OUT parameters */
-    u32     port;
+    uint32_t port;
 } evtchn_alloc_unbound_t;
 
 /*
@@ -30,14 +31,16 @@
  * a port that is unbound and marked as accepting bindings from the calling
  * domain. A fresh port is allocated in the calling domain and returned as
  * <local_port>.
+ * NOTES:
+ *  2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
  */
 #define EVTCHNOP_bind_interdomain 0
 typedef struct evtchn_bind_interdomain {
     /* IN parameters. */
-    domid_t remote_dom;
-    u32     remote_port;
+    domid_t  remote_dom;
+    uint32_t remote_port;
     /* OUT parameters. */
-    u32     local_port;
+    uint32_t local_port;
 } evtchn_bind_interdomain_t;
 
 /*
@@ -51,10 +54,10 @@
 #define EVTCHNOP_bind_virq        1
 typedef struct evtchn_bind_virq {
     /* IN parameters. */
-    u32 virq;
-    u32 vcpu;
+    uint32_t virq;
+    uint32_t vcpu;
     /* OUT parameters. */
-    u32 port;
+    uint32_t port;
 } evtchn_bind_virq_t;
 
 /*
@@ -66,11 +69,11 @@
 #define EVTCHNOP_bind_pirq        2
 typedef struct evtchn_bind_pirq {
     /* IN parameters. */
-    u32 pirq;
+    uint32_t pirq;
 #define BIND_PIRQ__WILL_SHARE 1
-    u32 flags; /* BIND_PIRQ__* */
+    uint32_t flags; /* BIND_PIRQ__* */
     /* OUT parameters. */
-    u32 port;
+    uint32_t port;
 } evtchn_bind_pirq_t;
 
 /*
@@ -81,9 +84,9 @@
  */
 #define EVTCHNOP_bind_ipi         7
 typedef struct evtchn_bind_ipi {
-    u32 vcpu;
+    uint32_t vcpu;
     /* OUT parameters. */
-    u32 port;
+    uint32_t port;
 } evtchn_bind_ipi_t;
 
 /*
@@ -94,7 +97,7 @@
 #define EVTCHNOP_close            3
 typedef struct evtchn_close {
     /* IN parameters. */
-    u32 port;
+    uint32_t port;
 } evtchn_close_t;
 
 /*
@@ -104,7 +107,7 @@
 #define EVTCHNOP_send             4
 typedef struct evtchn_send {
     /* IN parameters. */
-    u32 port;
+    uint32_t port;
 } evtchn_send_t;
 
 /*
@@ -118,8 +121,8 @@
 #define EVTCHNOP_status           5
 typedef struct evtchn_status {
     /* IN parameters */
-    domid_t dom;
-    u32     port;
+    domid_t  dom;
+    uint32_t port;
     /* OUT parameters */
 #define EVTCHNSTAT_closed       0  /* Channel is not in use.                 */
 #define EVTCHNSTAT_unbound      1  /* Channel is waiting interdom connection.*/
@@ -127,18 +130,18 @@
 #define EVTCHNSTAT_pirq         3  /* Channel is bound to a phys IRQ line.   */
 #define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
 #define EVTCHNSTAT_ipi          5  /* Channel is bound to a virtual IPI line */
-    u32     status;
-    u32     vcpu;                  /* VCPU to which this channel is bound.   */
+    uint32_t status;
+    uint32_t vcpu;                 /* VCPU to which this channel is bound.   */
     union {
         struct {
-            domid_t dom;
+            domid_t  dom;
         } unbound; /* EVTCHNSTAT_unbound */
         struct {
-            domid_t dom;
-            u32     port;
+            domid_t  dom;
+            uint32_t port;
         } interdomain; /* EVTCHNSTAT_interdomain */
-        u32 pirq;      /* EVTCHNSTAT_pirq        */
-        u32 virq;      /* EVTCHNSTAT_virq        */
+        uint32_t pirq;      /* EVTCHNSTAT_pirq        */
+        uint32_t virq;      /* EVTCHNSTAT_virq        */
     } u;
 } evtchn_status_t;
 
@@ -155,12 +158,12 @@
 #define EVTCHNOP_bind_vcpu        8
 typedef struct evtchn_bind_vcpu {
     /* IN parameters. */
-    u32 port;
-    u32 vcpu;
+    uint32_t port;
+    uint32_t vcpu;
 } evtchn_bind_vcpu_t;
 
 typedef struct evtchn_op {
-    u32 cmd; /* EVTCHNOP_* */
+    uint32_t cmd; /* EVTCHNOP_* */
     union {
         evtchn_alloc_unbound_t    alloc_unbound;
         evtchn_bind_interdomain_t bind_interdomain;
@@ -175,3 +178,13 @@
 } evtchn_op_t;
 
 #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/grant_table.h
--- a/xen/include/public/grant_table.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/grant_table.h  Mon Oct 24 15:08:13 2005
@@ -73,14 +73,14 @@
  */
 typedef struct grant_entry {
     /* GTF_xxx: various type and flag information.  [XEN,GST] */
-    u16     flags;
+    uint16_t     flags;
     /* The domain being granted foreign privileges. [GST] */
     domid_t domid;
     /*
      * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
      * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
      */
-    u32     frame;
+    uint32_t     frame;
 } grant_entry_t;
 
 /*
@@ -131,7 +131,7 @@
 /*
  * Reference to a grant entry in a specified domain's grant table.
  */
-typedef u16 grant_ref_t;
+typedef uint16_t grant_ref_t;
 
 /*
  * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
@@ -153,13 +153,13 @@
 #define GNTTABOP_map_grant_ref        0
 typedef struct gnttab_map_grant_ref {
     /* IN parameters. */
-    u64         host_addr;
-    domid_t     dom;
+    uint64_t host_addr;
+    domid_t  dom;
     grant_ref_t ref;
-    u16         flags;                /* GNTMAP_* */
-    /* OUT parameters. */
-    s16         handle;               /* +ve: handle; -ve: GNTST_* */
-    u64         dev_bus_addr;
+    uint16_t flags;               /* GNTMAP_* */
+    /* OUT parameters. */
+    int16_t  handle;              /* +ve: handle; -ve: GNTST_* */
+    uint64_t dev_bus_addr;
 } gnttab_map_grant_ref_t;
 
 /*
@@ -176,11 +176,11 @@
 #define GNTTABOP_unmap_grant_ref      1
 typedef struct gnttab_unmap_grant_ref {
     /* IN parameters. */
-    u64         host_addr;
-    u64         dev_bus_addr;
-    u16         handle;
-    /* OUT parameters. */
-    s16         status;               /* GNTST_* */
+    uint64_t host_addr;
+    uint64_t dev_bus_addr;
+    uint16_t handle;
+    /* OUT parameters. */
+    int16_t  status;              /* GNTST_* */
 } gnttab_unmap_grant_ref_t;
 
 /*
@@ -195,10 +195,10 @@
 #define GNTTABOP_setup_table          2
 typedef struct gnttab_setup_table {
     /* IN parameters. */
-    domid_t     dom;
-    u16         nr_frames;
-    /* OUT parameters. */
-    s16         status;               /* GNTST_* */
+    domid_t  dom;
+    uint16_t nr_frames;
+    /* OUT parameters. */
+    int16_t  status;              /* GNTST_* */
     unsigned long *frame_list;
 } gnttab_setup_table_t;
 
@@ -209,9 +209,9 @@
 #define GNTTABOP_dump_table           3
 typedef struct gnttab_dump_table {
     /* IN parameters. */
-    domid_t     dom;
-    /* OUT parameters. */
-    s16         status;               /* GNTST_* */
+    domid_t dom;
+    /* OUT parameters. */
+    int16_t status;               /* GNTST_* */
 } gnttab_dump_table_t;
 
 /*
@@ -223,10 +223,10 @@
 typedef struct {
     /* IN parameters. */
     unsigned long mfn;
-    domid_t     domid;
-    grant_ref_t ref;
-    /* OUT parameters. */
-    s16         status;
+    domid_t       domid;
+    grant_ref_t   ref;
+    /* OUT parameters. */
+    int16_t       status;
 } gnttab_transfer_t;
 
 /*
@@ -283,3 +283,13 @@
 }
 
 #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/blkif.h
--- a/xen/include/public/io/blkif.h     Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/blkif.h     Mon Oct 24 15:08:13 2005
@@ -12,9 +12,9 @@
 #include "ring.h"
 
 #ifndef blkif_vdev_t
-#define blkif_vdev_t   u16
+#define blkif_vdev_t   uint16_t
 #endif
-#define blkif_sector_t u64
+#define blkif_sector_t uint64_t
 
 #define BLKIF_OP_READ      0
 #define BLKIF_OP_WRITE     1
@@ -30,8 +30,8 @@
 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 
 typedef struct blkif_request {
-    u8             operation;    /* BLKIF_OP_???                         */
-    u8             nr_segments;  /* number of segments                   */
+    uint8_t        operation;    /* BLKIF_OP_???                         */
+    uint8_t        nr_segments;  /* number of segments                   */
     blkif_vdev_t   handle;       /* only for read/write requests         */
     unsigned long  id;           /* private guest value, echoed in resp  */
     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
@@ -51,8 +51,8 @@
 
 typedef struct blkif_response {
     unsigned long   id;              /* copied from request */
-    u8              operation;       /* copied from request */
-    s16             status;          /* BLKIF_RSP_???       */
+    uint8_t         operation;       /* copied from request */
+    int16_t         status;          /* BLKIF_RSP_???       */
 } blkif_response_t;
 
 #define BLKIF_RSP_ERROR  -1 /* non-specific 'error' */
@@ -72,3 +72,13 @@
 #define VDISK_READONLY     0x4
 
 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/console.h
--- a/xen/include/public/io/console.h   Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/console.h   Mon Oct 24 15:08:13 2005
@@ -9,7 +9,7 @@
 #ifndef __XEN_PUBLIC_IO_CONSOLE_H__
 #define __XEN_PUBLIC_IO_CONSOLE_H__
 
-typedef u32 XENCONS_RING_IDX;
+typedef uint32_t XENCONS_RING_IDX;
 
 #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
 
@@ -21,3 +21,13 @@
 };
 
 #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/ioreq.h
--- a/xen/include/public/io/ioreq.h     Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/ioreq.h     Mon Oct 24 15:08:13 2005
@@ -60,8 +60,10 @@
 #define INTR_LEN        (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint64_t)))
 
 typedef struct {
-    uint64_t pic_intr[INTR_LEN];
-    uint64_t pic_mask[INTR_LEN];
+    uint16_t  pic_elcr;
+    uint16_t   pic_irr;
+    uint16_t   pic_last_irr;
+    uint16_t   pic_clear_irr;
     int      eport; /* Event channel port */
 } global_iodata_t;
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/netif.h
--- a/xen/include/public/io/netif.h     Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/netif.h     Mon Oct 24 15:08:13 2005
@@ -11,34 +11,34 @@
 
 typedef struct netif_tx_request {
     grant_ref_t gref;      /* Reference to buffer page */
-    u16      offset:15;    /* Offset within buffer page */
-    u16      csum_blank:1; /* Proto csum field blank?   */
-    u16      id;           /* Echoed in response message. */
-    u16      size;         /* Packet size in bytes.       */
+    uint16_t offset:15;    /* Offset within buffer page */
+    uint16_t csum_blank:1; /* Proto csum field blank?   */
+    uint16_t id;           /* Echoed in response message. */
+    uint16_t size;         /* Packet size in bytes.       */
 } netif_tx_request_t;
 
 typedef struct netif_tx_response {
-    u16      id;
-    s8       status;
+    uint16_t id;
+    int8_t   status;
 } netif_tx_response_t;
 
 typedef struct {
-    u16       id;       /* Echoed in response message.        */
-    grant_ref_t gref;  /* Reference to incoming granted frame */
+    uint16_t    id;        /* Echoed in response message.        */
+    grant_ref_t gref;      /* Reference to incoming granted frame */
 } netif_rx_request_t;
 
 typedef struct {
-    u16      offset;     /* Offset in page of start of received packet  */
-    u16      csum_valid; /* Protocol checksum is validated?       */
-    u16      id;
-    s16      status;     /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+    uint16_t offset;     /* Offset in page of start of received packet  */
+    uint16_t csum_valid; /* Protocol checksum is validated?       */
+    uint16_t id;
+    int16_t  status;     /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
 } netif_rx_response_t;
 
 /*
  * We use a special capitalised type name because it is _essential_ that all 
  * arithmetic on indexes is done on an integer type of the correct size.
  */
-typedef u32 NETIF_RING_IDX;
+typedef uint32_t NETIF_RING_IDX;
 
 /*
  * Ring indexes are 'free running'. That is, they are not stored modulo the
@@ -91,3 +91,13 @@
 #define NETIF_RSP_OKAY             0
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/ring.h
--- a/xen/include/public/io/ring.h      Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/ring.h      Mon Oct 24 15:08:13 2005
@@ -197,3 +197,13 @@
     (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
 
 #endif /* __XEN_PUBLIC_IO_RING_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/tpmif.h
--- a/xen/include/public/io/tpmif.h     Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/tpmif.h     Mon Oct 24 15:08:13 2005
@@ -19,15 +19,15 @@
 typedef struct {
     unsigned long addr;   /* Machine address of packet.   */
     int      ref;         /* grant table access reference */
-    u16      id;          /* Echoed in response message.  */
-    u16      size;        /* Packet size in bytes.        */
+    uint16_t id;          /* Echoed in response message.  */
+    uint16_t size;        /* Packet size in bytes.        */
 } tpmif_tx_request_t;
 
 /*
  * The TPMIF_TX_RING_SIZE defines the number of pages the
  * front-end and backend can exchange (= size of array).
  */
-typedef u32 TPMIF_RING_IDX;
+typedef uint32_t TPMIF_RING_IDX;
 
 #define TPMIF_TX_RING_SIZE 10
 
@@ -42,3 +42,13 @@
 } tpmif_tx_interface_t;
 
 #endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/xs_wire.h
--- a/xen/include/public/io/xs_wire.h   Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/xs_wire.h   Mon Oct 24 15:08:13 2005
@@ -47,6 +47,7 @@
     XS_SET_PERMS,
     XS_WATCH_EVENT,
     XS_ERROR,
+    XS_IS_DOMAIN_INTRODUCED
 };
 
 #define XS_WRITE_NONE "NONE"
@@ -76,12 +77,13 @@
     XSD_ERROR(EAGAIN),
     XSD_ERROR(EISCONN),
 };
+
 struct xsd_sockmsg
 {
-    u32 type;  /* XS_??? */
-    u32 req_id;/* Request identifier, echoed in daemon's response.  */
-    u32 tx_id; /* Transaction id (0 if not related to a transaction). */
-    u32 len;   /* Length of data following this. */
+    uint32_t type;  /* XS_??? */
+    uint32_t req_id;/* Request identifier, echoed in daemon's response.  */
+    uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
+    uint32_t len;   /* Length of data following this. */
 
     /* Generally followed by nul-terminated string(s). */
 };
@@ -92,4 +94,25 @@
     XS_WATCH_TOKEN,
 };
 
+/* Inter-domain shared memory communications. */
+#define XENSTORE_RING_SIZE 1024
+typedef uint32_t XENSTORE_RING_IDX;
+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
+struct xenstore_domain_interface {
+    char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
+    char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
+    XENSTORE_RING_IDX req_cons, req_prod;
+    XENSTORE_RING_IDX rsp_cons, rsp_prod;
+};
+
 #endif /* _XS_WIRE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/memory.h
--- a/xen/include/public/memory.h       Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/memory.h       Mon Oct 24 15:08:13 2005
@@ -48,3 +48,13 @@
 } xen_memory_reservation_t;
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/physdev.h
--- a/xen/include/public/physdev.h      Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/physdev.h      Mon Oct 24 15:08:13 2005
@@ -13,41 +13,41 @@
 
 typedef struct physdevop_irq_status_query {
     /* IN */
-    u32 irq;
+    uint32_t irq;
     /* OUT */
 /* Need to call PHYSDEVOP_IRQ_UNMASK_NOTIFY when the IRQ has been serviced? */
 #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY (1<<0)
-    u32 flags;
+    uint32_t flags;
 } physdevop_irq_status_query_t;
 
 typedef struct physdevop_set_iopl {
     /* IN */
-    u32 iopl;
+    uint32_t iopl;
 } physdevop_set_iopl_t;
 
 typedef struct physdevop_set_iobitmap {
     /* IN */
-    u8 *bitmap;
-    u32 nr_ports;
+    uint8_t *bitmap;
+    uint32_t nr_ports;
 } physdevop_set_iobitmap_t;
 
 typedef struct physdevop_apic {
     /* IN */
-    u32 apic;
-    u32 offset;
+    uint32_t apic;
+    uint32_t offset;
     /* IN or OUT */
-    u32 value;
+    uint32_t value;
 } physdevop_apic_t; 
 
 typedef struct physdevop_irq {
     /* IN */
-    u32 irq;
+    uint32_t irq;
     /* OUT */
-    u32 vector;
+    uint32_t vector;
 } physdevop_irq_t; 
 
 typedef struct physdev_op {
-    u32 cmd;
+    uint32_t cmd;
     union {
         physdevop_irq_status_query_t      irq_status_query;
         physdevop_set_iopl_t              set_iopl;
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/sched.h
--- a/xen/include/public/sched.h        Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/sched.h        Mon Oct 24 15:08:13 2005
@@ -48,3 +48,13 @@
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/sched_ctl.h
--- a/xen/include/public/sched_ctl.h    Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/sched_ctl.h    Mon Oct 24 15:08:13 2005
@@ -20,39 +20,49 @@
  * parameters
  */
 struct sched_ctl_cmd {
-    u32 sched_id;
-    u32 direction;
+    uint32_t sched_id;
+    uint32_t direction;
     union {
         struct bvt_ctl {
-            u32 ctx_allow;
+            uint32_t ctx_allow;
         } bvt;
     } u;
 };
 
 struct sched_adjdom_cmd {
-    u32     sched_id;
-    u32     direction;
-    domid_t domain;
+    uint32_t sched_id;
+    uint32_t direction;
+    domid_t  domain;
     union {
         struct bvt_adjdom
         {
-            u32 mcu_adv;            /* mcu advance: inverse of weight */
-            u32 warpback;           /* warp? */
-            s32 warpvalue;          /* warp value */
-            long long warpl;        /* warp limit */
-            long long warpu;        /* unwarp time requirement */
+            uint32_t mcu_adv;      /* mcu advance: inverse of weight */
+            uint32_t warpback;     /* warp? */
+            int32_t  warpvalue;    /* warp value */
+            int64_t  warpl;        /* warp limit */
+            int64_t  warpu;        /* unwarp time requirement */
         } bvt;
         
-       struct sedf_adjdom
+        struct sedf_adjdom
         {
-            u64 period;
-            u64 slice;
-            u64 latency;
-            u16 extratime;
-           u16 weight;
+            uint64_t period;
+            uint64_t slice;
+            uint64_t latency;
+            uint16_t extratime;
+            uint16_t weight;
         } sedf;
 
     } u;
 };
 
 #endif /* __XEN_PUBLIC_SCHED_CTL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/trace.h
--- a/xen/include/public/trace.h        Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/trace.h        Mon Oct 24 15:08:13 2005
@@ -55,8 +55,8 @@
 
 /* This structure represents a single trace buffer record. */
 struct t_rec {
-    u64 cycles;               /* cycle counter timestamp */
-    u32 event;                /* event ID                */
+    uint64_t cycles;          /* cycle counter timestamp */
+    uint32_t event;           /* event ID                */
     unsigned long data[5];    /* event data items        */
 };
 
@@ -75,3 +75,13 @@
 };
 
 #endif /* __XEN_PUBLIC_TRACE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/vcpu.h
--- a/xen/include/public/vcpu.h Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/vcpu.h Mon Oct 24 15:08:13 2005
@@ -52,3 +52,13 @@
 #define VCPUOP_is_up                3
 
 #endif /* __XEN_PUBLIC_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/version.h
--- a/xen/include/public/version.h      Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/version.h      Mon Oct 24 15:08:13 2005
@@ -40,3 +40,13 @@
 } xen_parameters_info_t;
 
 #endif /* __XEN_PUBLIC_VERSION_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/vmx_assist.h
--- a/xen/include/public/vmx_assist.h   Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/vmx_assist.h   Mon Oct 24 15:08:13 2005
@@ -21,81 +21,90 @@
 #ifndef _VMX_ASSIST_H_
 #define _VMX_ASSIST_H_
 
-#define        VMXASSIST_BASE          0xD0000
-#define        VMXASSIST_MAGIC         0x17101966
-#define        VMXASSIST_MAGIC_OFFSET  (VMXASSIST_BASE+8)
+#define VMXASSIST_BASE         0xD0000
+#define VMXASSIST_MAGIC        0x17101966
+#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
 
-#define        VMXASSIST_NEW_CONTEXT   (VMXASSIST_BASE + 12)
-#define        VMXASSIST_OLD_CONTEXT   (VMXASSIST_NEW_CONTEXT + 4)
+#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
+#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
 
 #ifndef __ASSEMBLY__
 
 union vmcs_arbytes {
-       struct arbyte_fields {
-               unsigned int    seg_type        : 4,
-                               s               : 1,
-                               dpl             : 2,
-                               p               : 1, 
-                               reserved0       : 4,
-                               avl             : 1,
-                               reserved1       : 1,     
-                               default_ops_size: 1,
-                               g               : 1,
-                               null_bit        : 1, 
-                               reserved2       : 15;
-       } fields;
-       unsigned int bytes;
+    struct arbyte_fields {
+        unsigned int seg_type : 4,
+            s         : 1,
+            dpl       : 2,
+            p         : 1, 
+            reserved0 : 4,
+            avl       : 1,
+            reserved1 : 1,     
+            default_ops_size: 1,
+            g         : 1,
+            null_bit  : 1, 
+            reserved2 : 15;
+    } fields;
+    unsigned int bytes;
 };
 
 /*
  * World switch state
  */
 typedef struct vmx_assist_context {
-       u32             eip;            /* execution pointer */
-       u32             esp;            /* stack point */
-       u32             eflags;         /* flags register */
-       u32             cr0;
-       u32             cr3;            /* page table directory */
-       u32             cr4;
-       u32             idtr_limit;     /* idt */
-       u32             idtr_base;
-       u32             gdtr_limit;     /* gdt */
-       u32             gdtr_base;
-       u32             cs_sel;         /* cs selector */
-       u32             cs_limit;
-       u32             cs_base;
-       union vmcs_arbytes      cs_arbytes;
-       u32             ds_sel;         /* ds selector */
-       u32             ds_limit;
-       u32             ds_base;
-       union vmcs_arbytes      ds_arbytes;
-       u32             es_sel;         /* es selector */
-       u32             es_limit;
-       u32             es_base;
-       union vmcs_arbytes      es_arbytes;
-       u32             ss_sel;         /* ss selector */
-       u32             ss_limit;
-       u32             ss_base;
-       union vmcs_arbytes      ss_arbytes;
-       u32             fs_sel;         /* fs selector */
-       u32             fs_limit;
-       u32             fs_base;
-       union vmcs_arbytes      fs_arbytes;
-       u32             gs_sel;         /* gs selector */
-       u32             gs_limit;
-       u32             gs_base;
-       union vmcs_arbytes      gs_arbytes;
-       u32             tr_sel;         /* task selector */
-       u32             tr_limit;
-       u32             tr_base;
-       union vmcs_arbytes      tr_arbytes;
-       u32             ldtr_sel;       /* ldtr selector */
-       u32             ldtr_limit;
-       u32             ldtr_base;
-       union vmcs_arbytes      ldtr_arbytes;
+    uint32_t  eip;        /* execution pointer */
+    uint32_t  esp;        /* stack pointer */
+    uint32_t  eflags;     /* flags register */
+    uint32_t  cr0;
+    uint32_t  cr3;        /* page table directory */
+    uint32_t  cr4;
+    uint32_t  idtr_limit; /* idt */
+    uint32_t  idtr_base;
+    uint32_t  gdtr_limit; /* gdt */
+    uint32_t  gdtr_base;
+    uint32_t  cs_sel;     /* cs selector */
+    uint32_t  cs_limit;
+    uint32_t  cs_base;
+    union vmcs_arbytes cs_arbytes;
+    uint32_t  ds_sel;     /* ds selector */
+    uint32_t  ds_limit;
+    uint32_t  ds_base;
+    union vmcs_arbytes ds_arbytes;
+    uint32_t  es_sel;     /* es selector */
+    uint32_t  es_limit;
+    uint32_t  es_base;
+    union vmcs_arbytes es_arbytes;
+    uint32_t  ss_sel;     /* ss selector */
+    uint32_t  ss_limit;
+    uint32_t  ss_base;
+    union vmcs_arbytes ss_arbytes;
+    uint32_t  fs_sel;     /* fs selector */
+    uint32_t  fs_limit;
+    uint32_t  fs_base;
+    union vmcs_arbytes fs_arbytes;
+    uint32_t  gs_sel;     /* gs selector */
+    uint32_t  gs_limit;
+    uint32_t  gs_base;
+    union vmcs_arbytes gs_arbytes;
+    uint32_t  tr_sel;     /* task selector */
+    uint32_t  tr_limit;
+    uint32_t  tr_base;
+    union vmcs_arbytes tr_arbytes;
+    uint32_t  ldtr_sel;   /* ldtr selector */
+    uint32_t  ldtr_limit;
+    uint32_t  ldtr_base;
+    union vmcs_arbytes ldtr_arbytes;
 } vmx_assist_context_t;
 
 #endif /* __ASSEMBLY__ */
 
 #endif /* _VMX_ASSIST_H_ */
 
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/xen.h
--- a/xen/include/public/xen.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/xen.h  Mon Oct 24 15:08:13 2005
@@ -213,7 +213,7 @@
 
 #ifndef __ASSEMBLY__
 
-typedef u16 domid_t;
+typedef uint16_t domid_t;
 
 /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
 #define DOMID_FIRST_RESERVED (0x7FF0U)
@@ -246,8 +246,8 @@
  */
 typedef struct
 {
-    u64 ptr;       /* Machine address of PTE. */
-    u64 val;       /* New contents of PTE.    */
+    uint64_t ptr;       /* Machine address of PTE. */
+    uint64_t val;       /* New contents of PTE.    */
 } mmu_update_t;
 
 /*
@@ -260,8 +260,11 @@
     unsigned long args[6];
 } multicall_entry_t;
 
-/* Event channel endpoints per domain. */
-#define NR_EVENT_CHANNELS 1024
+/*
+ * Event channel endpoints per domain:
+ *  1024 if a long is 32 bits; 4096 if a long is 64 bits.
+ */
+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
 
 /*
  * Per-VCPU information goes here. This will be cleaned up more when Xen 
@@ -293,9 +296,9 @@
      * an upcall activation. The mask is cleared when the VCPU requests
      * to block: this avoids wakeup-waiting races.
      */
-    u8 evtchn_upcall_pending;
-    u8 evtchn_upcall_mask;
-    u32 evtchn_pending_sel;
+    uint8_t evtchn_upcall_pending;
+    uint8_t evtchn_upcall_mask;
+    unsigned long evtchn_pending_sel;
 #ifdef __ARCH_HAS_VCPU_INFO
     arch_vcpu_info_t arch;
 #endif
@@ -311,17 +314,17 @@
      * The correct way to interact with the version number is similar to
      * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
      */
-    u32 version;
-    u64 tsc_timestamp;   /* TSC at last update of time vals.  */
-    u64 system_time;     /* Time, in nanosecs, since boot.    */
+    uint32_t version;
+    uint64_t tsc_timestamp;   /* TSC at last update of time vals.  */
+    uint64_t system_time;     /* Time, in nanosecs, since boot.    */
     /*
      * Current system time:
      *   system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
      * CPU frequency (Hz):
      *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
      */
-    u32 tsc_to_system_mul;
-    s8  tsc_shift;
+    uint32_t tsc_to_system_mul;
+    int8_t  tsc_shift;
 } vcpu_time_info_t;
 
 /*
@@ -333,16 +336,14 @@
 
     vcpu_time_info_t vcpu_time[MAX_VIRT_CPUS];
 
-    u32 n_vcpu;
-
     /*
-     * A domain can have up to 1024 "event channels" on which it can send
-     * and receive asynchronous event notifications. There are three classes
-     * of event that are delivered by this mechanism:
+     * A domain can create "event channels" on which it can send and receive
+     * asynchronous event notifications. There are three classes of event that
+     * are delivered by this mechanism:
      *  1. Bi-directional inter- and intra-domain connections. Domains must
-     *     arrange out-of-band to set up a connection (usually the setup
-     *     is initiated and organised by a privileged third party such as
-     *     software running in domain 0).
+     *     arrange out-of-band to set up a connection (usually by allocating
+     *     an unbound 'listener' port and avertising that via a storage service
+     *     such as xenstore).
      *  2. Physical interrupts. A domain with suitable hardware-access
      *     privileges can bind an event-channel port to a physical interrupt
      *     source.
@@ -350,8 +351,8 @@
      *     port to a virtual interrupt source, such as the virtual-timer
      *     device or the emergency console.
      * 
-     * Event channels are addressed by a "port index" between 0 and 1023.
-     * Each channel is associated with two bits of information:
+     * Event channels are addressed by a "port index". Each channel is
+     * associated with two bits of information:
      *  1. PENDING -- notifies the domain that there is a pending notification
      *     to be processed. This bit is cleared by the guest.
      *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
@@ -363,19 +364,19 @@
      * 
      * To expedite scanning of pending notifications, any 0->1 pending
      * transition on an unmasked channel causes a corresponding bit in a
-     * 32-bit selector to be set. Each bit in the selector covers a 32-bit
-     * word in the PENDING bitfield array.
+     * per-vcpu selector word to be set. Each bit in the selector covers a
+     * 'C long' in the PENDING bitfield array.
      */
-    u32 evtchn_pending[32];
-    u32 evtchn_mask[32];
+    unsigned long evtchn_pending[sizeof(unsigned long) * 8];
+    unsigned long evtchn_mask[sizeof(unsigned long) * 8];
 
     /*
      * Wallclock time: updated only by control software. Guests should base
      * their gettimeofday() syscall on this wallclock-base value.
      */
-    u32 wc_version;      /* Version counter: see vcpu_time_info_t. */
-    u32 wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
-    u32 wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
+    uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
+    uint32_t wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
+    uint32_t wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
 
     arch_shared_info_t arch;
 
@@ -411,32 +412,38 @@
     /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME.    */
     unsigned long nr_pages;     /* Total pages allocated to this domain.  */
     unsigned long shared_info;  /* MACHINE address of shared info struct. */
-    u32      flags;             /* SIF_xxx flags.                         */
+    uint32_t flags;             /* SIF_xxx flags.                         */
     unsigned long store_mfn;    /* MACHINE page number of shared page.    */
-    u16      store_evtchn;      /* Event channel for store communication. */
+    uint16_t store_evtchn;      /* Event channel for store communication. */
     unsigned long console_mfn;  /* MACHINE address of console page.       */
-    u16      console_evtchn;    /* Event channel for console messages.    */
+    uint16_t console_evtchn;    /* Event channel for console messages.    */
     /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).     */
     unsigned long pt_base;      /* VIRTUAL address of page directory.     */
     unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames.       */
     unsigned long mfn_list;     /* VIRTUAL address of page-frame list.    */
     unsigned long mod_start;    /* VIRTUAL address of pre-loaded module.  */
     unsigned long mod_len;      /* Size (bytes) of pre-loaded module.     */
-    s8 cmd_line[MAX_GUEST_CMDLINE];
+    int8_t cmd_line[MAX_GUEST_CMDLINE];
 } start_info_t;
 
 /* These flags are passed in the 'flags' field of start_info_t. */
 #define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
 #define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
-#define SIF_BLK_BE_DOMAIN (1<<4)  /* Is this a block backend domain? */
-#define SIF_NET_BE_DOMAIN (1<<5)  /* Is this a net backend domain? */
-#define SIF_USB_BE_DOMAIN (1<<6)  /* Is this a usb backend domain? */
-#define SIF_TPM_BE_DOMAIN (1<<7)  /* Is this a TPM backend domain? */
-/* For use in guest OSes. */
-extern shared_info_t *HYPERVISOR_shared_info;
-
-typedef u64 cpumap_t;
+
+typedef uint64_t cpumap_t;
+
+typedef uint8_t xen_domain_handle_t[16];
 
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __XEN_PUBLIC_XEN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/xen/ac_timer.h
--- a/xen/include/xen/ac_timer.h        Fri Oct 21 19:58:39 2005
+++ b/xen/include/xen/ac_timer.h        Mon Oct 24 15:08:13 2005
@@ -63,6 +63,11 @@
  */
 extern void rem_ac_timer(struct ac_timer *timer);
 
+/*
+ * Initialisation. Must be called before any other ac_timer function.
+ */
+extern void ac_timer_init(void);
+
 #endif /* _AC_TIMER_H_ */
 
 /*
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/xen/domain.h
--- a/xen/include/xen/domain.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/xen/domain.h  Mon Oct 24 15:08:13 2005
@@ -1,18 +1,19 @@
 
 #ifndef __XEN_DOMAIN_H__
 #define __XEN_DOMAIN_H__
+
+extern int boot_vcpu(
+    struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt);
 
 /*
  * Arch-specifics.
  */
 
-struct vcpu *arch_alloc_vcpu_struct(void);
+struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id);
 
-extern void arch_free_vcpu_struct(struct vcpu *v);
+extern void free_vcpu_struct(struct vcpu *v);
 
 extern void arch_do_createdomain(struct vcpu *v);
-
-extern void arch_do_boot_vcpu(struct vcpu *v);
 
 extern int  arch_set_info_guest(
     struct vcpu *v, struct vcpu_guest_context *c);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/xen/event.h
--- a/xen/include/xen/event.h   Fri Oct 21 19:58:39 2005
+++ b/xen/include/xen/event.h   Mon Oct 24 15:08:13 2005
@@ -28,10 +28,11 @@
     shared_info_t *s = d->shared_info;
 
     /* These four operations must happen in strict order. */
-    if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
-         !test_bit        (port,    &s->evtchn_mask[0])    &&
-         !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) &&
-         !test_and_set_bit(0,       &v->vcpu_info->evtchn_upcall_pending) )
+    if ( !test_and_set_bit(port, &s->evtchn_pending[0]) &&
+         !test_bit        (port, &s->evtchn_mask[0])    &&
+         !test_and_set_bit(port / BITS_PER_LONG,
+                           &v->vcpu_info->evtchn_pending_sel) &&
+         !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
     {
         evtchn_notify(v);
     }
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri Oct 21 19:58:39 2005
+++ b/xen/include/xen/sched.h   Mon Oct 24 15:08:13 2005
@@ -19,7 +19,7 @@
 /* A global pointer to the initial domain (DOM0). */
 extern struct domain *dom0;
 
-#define MAX_EVTCHNS        1024
+#define MAX_EVTCHNS        NR_EVENT_CHANNELS
 #define EVTCHNS_PER_BUCKET 128
 #define NR_EVTCHN_BUCKETS  (MAX_EVTCHNS / EVTCHNS_PER_BUCKET)
 
@@ -61,7 +61,8 @@
     vcpu_info_t     *vcpu_info;
 
     struct domain   *domain;
-    struct vcpu *next_in_list;
+
+    struct vcpu     *next_in_list;
 
     struct ac_timer  timer;         /* one-shot timer for timeout values */
     unsigned long    sleep_tick;    /* tick at which this vcpu started sleep */
@@ -138,6 +139,9 @@
     struct arch_domain arch;
 
     void *ssid; /* sHype security subject identifier */
+
+    /* Control-plane tools handle for this domain. */
+    xen_domain_handle_t handle;
 };
 
 struct domain_setup_info
@@ -166,11 +170,11 @@
 #define IDLE_DOMAIN_ID   (0x7FFFU)
 #define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
 
-struct vcpu *alloc_vcpu_struct(struct domain *d,
-                                             unsigned long vcpu);
-
-void free_domain_struct(struct domain *d);
-struct domain *alloc_domain_struct();
+struct vcpu *alloc_vcpu(
+    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
+
+struct domain *alloc_domain(void);
+void free_domain(struct domain *d);
 
 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
 #define put_domain(_d) \
@@ -327,13 +331,15 @@
 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
 extern struct domain *domain_list;
 
-#define for_each_domain(_d) \
- for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_in_list )
-
-#define for_each_vcpu(_d,_ed) \
- for ( (_ed) = (_d)->vcpu[0]; \
-       (_ed) != NULL;                \
-       (_ed) = (_ed)->next_in_list )
+#define for_each_domain(_d)                     \
+ for ( (_d) = domain_list;                      \
+       (_d) != NULL;                            \
+       (_d) = (_d)->next_in_list )
+
+#define for_each_vcpu(_d,_v)                    \
+ for ( (_v) = (_d)->vcpu[0];                    \
+       (_v) != NULL;                            \
+       (_v) = (_v)->next_in_list )
 
 /*
  * Per-VCPU flags (vcpu_flags).
@@ -345,57 +351,55 @@
 #define _VCPUF_fpu_dirtied     1
 #define VCPUF_fpu_dirtied      (1UL<<_VCPUF_fpu_dirtied)
  /* Domain is blocked waiting for an event. */
-#define _VCPUF_blocked         3
+#define _VCPUF_blocked         2
 #define VCPUF_blocked          (1UL<<_VCPUF_blocked)
- /* Domain is paused by controller software. */
-#define _VCPUF_ctrl_pause      4
-#define VCPUF_ctrl_pause       (1UL<<_VCPUF_ctrl_pause)
  /* Currently running on a CPU? */
-#define _VCPUF_running         5
+#define _VCPUF_running         3
 #define VCPUF_running          (1UL<<_VCPUF_running)
  /* Disables auto-migration between CPUs. */
-#define _VCPUF_cpu_pinned      6
+#define _VCPUF_cpu_pinned      4
 #define VCPUF_cpu_pinned       (1UL<<_VCPUF_cpu_pinned)
  /* Domain migrated between CPUs. */
-#define _VCPUF_cpu_migrated    7
+#define _VCPUF_cpu_migrated    5
 #define VCPUF_cpu_migrated     (1UL<<_VCPUF_cpu_migrated)
  /* Initialization completed. */
-#define _VCPUF_initialised     8
+#define _VCPUF_initialised     6
 #define VCPUF_initialised      (1UL<<_VCPUF_initialised)
  /* VCPU is not-runnable */
-#define _VCPUF_down            9
+#define _VCPUF_down            7
 #define VCPUF_down             (1UL<<_VCPUF_down)
 
 /*
  * Per-domain flags (domain_flags).
  */
- /* Has the guest OS been fully built yet? */
-#define _DOMF_constructed      0
-#define DOMF_constructed       (1UL<<_DOMF_constructed)
  /* Is this one of the per-CPU idle domains? */
-#define _DOMF_idle_domain      1
+#define _DOMF_idle_domain      0
 #define DOMF_idle_domain       (1UL<<_DOMF_idle_domain)
  /* Is this domain privileged? */
-#define _DOMF_privileged       2
+#define _DOMF_privileged       1
 #define DOMF_privileged        (1UL<<_DOMF_privileged)
  /* May this domain do IO to physical devices? */
-#define _DOMF_physdev_access   3
+#define _DOMF_physdev_access   2
 #define DOMF_physdev_access    (1UL<<_DOMF_physdev_access)
  /* Guest shut itself down for some reason. */
-#define _DOMF_shutdown         4
+#define _DOMF_shutdown         3
 #define DOMF_shutdown          (1UL<<_DOMF_shutdown)
  /* Guest is in process of shutting itself down (becomes DOMF_shutdown). */
-#define _DOMF_shuttingdown     5
+#define _DOMF_shuttingdown     4
 #define DOMF_shuttingdown      (1UL<<_DOMF_shuttingdown)
  /* Death rattle. */
-#define _DOMF_dying            6
+#define _DOMF_dying            5
 #define DOMF_dying             (1UL<<_DOMF_dying)
+ /* Domain is paused by controller software. */
+#define _DOMF_ctrl_pause       6
+#define DOMF_ctrl_pause        (1UL<<_DOMF_ctrl_pause)
 
 static inline int domain_runnable(struct vcpu *v)
 {
     return ( (atomic_read(&v->pausecnt) == 0) &&
-             !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause|VCPUF_down)) &&
-             !(v->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
+             !(v->vcpu_flags & (VCPUF_blocked|VCPUF_down)) &&
+             !(v->domain->domain_flags &
+               (DOMF_shutdown|DOMF_shuttingdown|DOMF_ctrl_pause)) );
 }
 
 void vcpu_pause(struct vcpu *v);
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/xen/shadow.h
--- a/xen/include/xen/shadow.h  Fri Oct 21 19:58:39 2005
+++ b/xen/include/xen/shadow.h  Mon Oct 24 15:08:13 2005
@@ -12,7 +12,6 @@
 
 #define shadow_drop_references(_d, _p)          ((void)0)
 #define shadow_sync_and_drop_references(_d, _p) ((void)0)
-#define shadow_tainted_refcnts(_d)              (0)
 
 #endif
 
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/xen/smp.h
--- a/xen/include/xen/smp.h     Fri Oct 21 19:58:39 2005
+++ b/xen/include/xen/smp.h     Mon Oct 24 15:08:13 2005
@@ -90,7 +90,7 @@
 #define smp_processor_id()                     0
 #endif
 #define hard_smp_processor_id()                        0
-#define smp_call_function(func,info,retry,wait)        0
+#define smp_call_function(func,info,retry,wait)        ({ do {} while (0); 0; 
})
 #define on_each_cpu(func,info,retry,wait)      ({ func(info); 0; })
 #define num_booting_cpus()                     1
 #define smp_prepare_boot_cpu()                 do {} while (0)
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/xen/types.h
--- a/xen/include/xen/types.h   Fri Oct 21 19:58:39 2005
+++ b/xen/include/xen/types.h   Mon Oct 24 15:08:13 2005
@@ -32,23 +32,21 @@
 typedef unsigned int            uint;
 typedef unsigned long           ulong;
 
-#ifndef __BIT_TYPES_DEFINED__
-#define __BIT_TYPES_DEFINED__
-
+typedef         __u8            uint8_t;
 typedef         __u8            u_int8_t;
 typedef         __s8            int8_t;
+
+typedef         __u16           uint16_t;
 typedef         __u16           u_int16_t;
 typedef         __s16           int16_t;
+
+typedef         __u32           uint32_t;
 typedef         __u32           u_int32_t;
 typedef         __s32           int32_t;
 
-#endif /* !(__BIT_TYPES_DEFINED__) */
-
-typedef         __u8            uint8_t;
-typedef         __u16           uint16_t;
-typedef         __u32           uint32_t;
 typedef         __u64           uint64_t;
-
+typedef         __u64           u_int64_t;
+typedef         __s64           int64_t;
 
 struct domain;
 struct vcpu;
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c    Mon Oct 24 15:08:13 2005
@@ -0,0 +1,417 @@
+/*
+ *     Xen SMP booting functions
+ *
+ *     See arch/i386/kernel/smpboot.c for copyright and credits for derived
+ *     portions of this file.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/smp_lock.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
+#include <asm/desc.h>
+#include <asm/arch_hooks.h>
+#include <asm/pgalloc.h>
+#include <asm-xen/evtchn.h>
+#include <asm-xen/xen-public/vcpu.h>
+#include <asm-xen/xenbus.h>
+
+#ifdef CONFIG_SMP_ALTERNATIVES
+#include <asm/smp_alt.h>
+#endif
+
+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
+
+extern void local_setup_timer(unsigned int cpu);
+extern void local_teardown_timer(unsigned int cpu);
+
+extern void hypervisor_callback(void);
+extern void failsafe_callback(void);
+extern void system_call(void);
+extern void smp_trap_init(trap_info_t *);
+
+extern cpumask_t cpu_initialized;
+
+/* Number of siblings per CPU package */
+int smp_num_siblings = 1;
+int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
+EXPORT_SYMBOL(phys_proc_id);
+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
+EXPORT_SYMBOL(cpu_core_id);
+
+cpumask_t cpu_online_map;
+EXPORT_SYMBOL(cpu_online_map);
+cpumask_t cpu_possible_map;
+EXPORT_SYMBOL(cpu_possible_map);
+
+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_data);
+
+#ifdef CONFIG_HOTPLUG_CPU
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+#endif
+
+static DEFINE_PER_CPU(int, resched_irq);
+static DEFINE_PER_CPU(int, callfunc_irq);
+static char resched_name[NR_CPUS][15];
+static char callfunc_name[NR_CPUS][15];
+
+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
+void *xquad_portio;
+
+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_core_map);
+
+#if defined(__i386__)
+u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
+EXPORT_SYMBOL(x86_cpu_to_apicid);
+#elif !defined(CONFIG_X86_IO_APIC)
+unsigned int maxcpus = NR_CPUS;
+#endif
+
+void __init smp_alloc_memory(void)
+{
+}
+
+static void xen_smp_intr_init(unsigned int cpu)
+{
+       per_cpu(resched_irq, cpu) =
+               bind_ipi_to_irq(RESCHEDULE_VECTOR, cpu);
+       sprintf(resched_name[cpu], "resched%d", cpu);
+       BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
+                          SA_INTERRUPT, resched_name[cpu], NULL));
+
+       per_cpu(callfunc_irq, cpu) =
+               bind_ipi_to_irq(CALL_FUNCTION_VECTOR, cpu);
+       sprintf(callfunc_name[cpu], "callfunc%d", cpu);
+       BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
+                          smp_call_function_interrupt,
+                          SA_INTERRUPT, callfunc_name[cpu], NULL));
+
+       if (cpu != 0)
+               local_setup_timer(cpu);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void xen_smp_intr_exit(unsigned int cpu)
+{
+       if (cpu != 0)
+               local_teardown_timer(cpu);
+
+       free_irq(per_cpu(resched_irq, cpu), NULL);
+       unbind_ipi_from_irq(RESCHEDULE_VECTOR, cpu);
+
+       free_irq(per_cpu(callfunc_irq, cpu), NULL);
+       unbind_ipi_from_irq(CALL_FUNCTION_VECTOR, cpu);
+}
+#endif
+
+static void cpu_bringup(void)
+{
+       if (!cpu_isset(smp_processor_id(), cpu_initialized))
+               cpu_init();
+       local_irq_enable();
+       cpu_idle();
+}
+
+void vcpu_prepare(int vcpu)
+{
+       vcpu_guest_context_t ctxt;
+       struct task_struct *idle = idle_task(vcpu);
+
+       if (vcpu == 0)
+               return;
+
+       memset(&ctxt, 0, sizeof(ctxt));
+
+       ctxt.flags = VGCF_IN_KERNEL;
+       ctxt.user_regs.ds = __USER_DS;
+       ctxt.user_regs.es = __USER_DS;
+       ctxt.user_regs.fs = 0;
+       ctxt.user_regs.gs = 0;
+       ctxt.user_regs.ss = __KERNEL_DS;
+       ctxt.user_regs.eip = (unsigned long)cpu_bringup;
+       ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
+
+       memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
+
+       smp_trap_init(ctxt.trap_ctxt);
+
+       ctxt.ldt_ents = 0;
+
+       ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
+       ctxt.gdt_ents      = cpu_gdt_descr[vcpu].size / 8;
+
+#ifdef __i386__
+       ctxt.user_regs.cs = __KERNEL_CS;
+       ctxt.user_regs.esp = idle->thread.esp;
+
+       ctxt.kernel_ss = __KERNEL_DS;
+       ctxt.kernel_sp = idle->thread.esp0;
+
+       ctxt.event_callback_cs     = __KERNEL_CS;
+       ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
+       ctxt.failsafe_callback_cs  = __KERNEL_CS;
+       ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
+
+       ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
+#else
+       ctxt.user_regs.cs = __KERNEL_CS | 3;
+       ctxt.user_regs.esp = idle->thread.rsp;
+
+       ctxt.kernel_ss = __KERNEL_DS;
+       ctxt.kernel_sp = idle->thread.rsp0;
+
+       ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
+       ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
+       ctxt.syscall_callback_eip  = (unsigned long)system_call;
+
+       ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
+
+       ctxt.gs_base_kernel = (unsigned long)(cpu_pda + vcpu);
+#endif
+
+       BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt));
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+       int cpu, rc;
+       struct task_struct *idle;
+
+       if (max_cpus == 0)
+               return;
+
+       xen_smp_intr_init(0);
+
+       for (cpu = 1; cpu < max_cpus; cpu++) {
+               rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
+               if (rc == -ENOENT)
+                       break;
+               BUG_ON(rc != 0);
+
+               cpu_data[cpu] = boot_cpu_data;
+               cpu_2_logical_apicid[cpu] = cpu;
+               x86_cpu_to_apicid[cpu] = cpu;
+
+               idle = fork_idle(cpu);
+               if (IS_ERR(idle))
+                       panic("failed fork for CPU %d", cpu);
+
+#ifdef __x86_64__
+               cpu_pda[cpu].pcurrent = idle;
+               cpu_pda[cpu].cpunumber = cpu;
+               per_cpu(init_tss,cpu).rsp0 = idle->thread.rsp;
+               clear_ti_thread_flag(idle->thread_info, TIF_FORK);
+#endif
+
+               irq_ctx_init(cpu);
+
+               cpu_gdt_descr[cpu].address =
+                       __get_free_page(GFP_KERNEL|__GFP_ZERO);
+               BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
+               cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
+               memcpy((void *)cpu_gdt_descr[cpu].address,
+                      (void *)cpu_gdt_descr[0].address,
+                      cpu_gdt_descr[0].size);
+               make_page_readonly((void *)cpu_gdt_descr[cpu].address);
+
+               cpu_set(cpu, cpu_possible_map);
+               if (xen_start_info->flags & SIF_INITDOMAIN)
+                       cpu_set(cpu, cpu_present_map);
+
+               vcpu_prepare(cpu);
+       }
+
+       /* Currently, Xen gives no dynamic NUMA/HT info. */
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               cpus_clear(cpu_sibling_map[cpu]);
+               cpus_clear(cpu_core_map[cpu]);
+       }
+
+#ifdef CONFIG_X86_IO_APIC
+       /*
+        * Here we can be sure that there is an IO-APIC in the system. Let's
+        * go and set it up:
+        */
+       if (!skip_ioapic_setup && nr_ioapics)
+               setup_IO_APIC();
+#endif
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+       cpu_possible_map = cpumask_of_cpu(0);
+       cpu_present_map  = cpumask_of_cpu(0);
+       cpu_online_map   = cpumask_of_cpu(0);
+
+       cpu_data[0] = boot_cpu_data;
+       cpu_2_logical_apicid[0] = 0;
+       x86_cpu_to_apicid[0] = 0;
+
+       current_thread_info()->cpu = 0;
+       cpus_clear(cpu_sibling_map[0]);
+       cpu_set(0, cpu_sibling_map[0]);
+
+       cpus_clear(cpu_core_map[0]);
+       cpu_set(0, cpu_core_map[0]);
+}
+
+static void vcpu_hotplug(unsigned int cpu)
+{
+       int err;
+       char dir[32], state[32];
+
+       if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
+               return;
+
+       sprintf(dir, "cpu/%d", cpu);
+       err = xenbus_scanf(NULL, dir, "availability", "%s", state);
+       if (err != 1) {
+               printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
+               return;
+       }
+
+       if (strcmp(state, "online") == 0) {
+               cpu_set(cpu, cpu_present_map);
+               (void)cpu_up(cpu);
+       } else if (strcmp(state, "offline") == 0) {
+#ifdef CONFIG_HOTPLUG_CPU
+               (void)cpu_down(cpu);
+#else
+               printk(KERN_INFO "Ignoring CPU%d hotplug request\n", cpu);
+#endif
+       } else {
+               printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
+                      state, cpu);
+       }
+}
+
+static void handle_vcpu_hotplug_event(
+       struct xenbus_watch *watch, const char **vec, unsigned int len)
+{
+       int cpu;
+       char *cpustr;
+       const char *node = vec[XS_WATCH_PATH];
+
+       if ((cpustr = strstr(node, "cpu/")) != NULL) {
+               sscanf(cpustr, "cpu/%d", &cpu);
+               vcpu_hotplug(cpu);
+       }
+}
+
+static int setup_cpu_watcher(struct notifier_block *notifier,
+                             unsigned long event, void *data)
+{
+       int i;
+
+       static struct xenbus_watch cpu_watch = {
+               .node = "cpu",
+               .callback = handle_vcpu_hotplug_event };
+       (void)register_xenbus_watch(&cpu_watch);
+
+       if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
+               for_each_cpu(i)
+                       vcpu_hotplug(i);
+               printk(KERN_INFO "Brought up %ld CPUs\n",
+                      (long)num_online_cpus());
+       }
+
+       return NOTIFY_DONE;
+}
+
+static int __init setup_vcpu_hotplug_event(void)
+{
+       static struct notifier_block xsn_cpu = {
+               .notifier_call = setup_cpu_watcher };
+       register_xenstore_notifier(&xsn_cpu);
+       return 0;
+}
+
+subsys_initcall(setup_vcpu_hotplug_event);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+int __cpu_disable(void)
+{
+       cpumask_t map = cpu_online_map;
+       int cpu = smp_processor_id();
+
+       if (cpu == 0)
+               return -EBUSY;
+
+       cpu_clear(cpu, map);
+       fixup_irqs(map);
+       cpu_clear(cpu, cpu_online_map);
+
+       return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+               current->state = TASK_UNINTERRUPTIBLE;
+               schedule_timeout(HZ/10);
+       }
+
+       xen_smp_intr_exit(cpu);
+
+#ifdef CONFIG_SMP_ALTERNATIVES
+       if (num_online_cpus() == 1)
+               unprepare_for_smp();
+#endif
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+
+int __cpu_disable(void)
+{
+       return -ENOSYS;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       BUG();
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+int __devinit __cpu_up(unsigned int cpu)
+{
+#ifdef CONFIG_SMP_ALTERNATIVES
+       if (num_online_cpus() == 1)
+               prepare_for_smp();
+#endif
+
+       xen_smp_intr_init(cpu);
+       cpu_set(cpu, cpu_online_map);
+       HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
+
+       return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/smp.h
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/smp.h       Mon Oct 24 
15:08:13 2005
@@ -0,0 +1,93 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef __ASSEMBLY__
+#include <asm/fixmap.h>
+#include <asm/bitops.h>
+#include <asm/mpspec.h>
+#ifdef CONFIG_X86_IO_APIC
+#include <asm/io_apic.h>
+#endif
+#include <asm/apic.h>
+#endif
+#endif
+
+#define BAD_APICID 0xFFu
+#ifdef CONFIG_SMP
+#ifndef __ASSEMBLY__
+
+/*
+ * Private routines/data
+ */
+ 
+extern void smp_alloc_memory(void);
+extern int pic_mode;
+extern int smp_num_siblings;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+
+extern void smp_flush_tlb(void);
+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+extern void smp_invalidate_rcv(void);          /* Process an NMI */
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings (void);
+
+#define MAX_APICID 256
+extern u8 x86_cpu_to_apicid[];
+
+/*
+ * This function is needed by all SMP systems. It must _always_ be valid
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+#define __smp_processor_id() (current_thread_info()->cpu)
+
+extern cpumask_t cpu_possible_map;
+#define cpu_callin_map cpu_possible_map
+
+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
+static inline int num_booting_cpus(void)
+{
+       return cpus_weight(cpu_possible_map);
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#ifdef APIC_DEFINITION
+extern int hard_smp_processor_id(void);
+#else
+#include <mach_apicdef.h>
+static inline int hard_smp_processor_id(void)
+{
+       /* we don't want to mark this access volatile - bad code generation */
+       return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
+}
+#endif
+
+static __inline int logical_smp_processor_id(void)
+{
+       /* we don't want to mark this access volatile - bad code generation */
+       return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+}
+
+#endif
+
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* !__ASSEMBLY__ */
+
+#define NO_PROC_ID             0xFF            /* No processor magic marker */
+
+#endif
+#endif
diff -r ff7c5a791ed5 -r fdea4a967bc7 patches/linux-2.6.12/2.6.12.6.patch
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/patches/linux-2.6.12/2.6.12.6.patch       Mon Oct 24 15:08:13 2005
@@ -0,0 +1,1738 @@
+diff --git a/Makefile b/Makefile
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 12
+-EXTRAVERSION =
++EXTRAVERSION = .6
+ NAME=Woozy Numbat
+ 
+ # *DOCUMENTATION*
+@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
+ #(which is the most common case IMHO) to avoid unneeded clutter in the big 
tags file.
+ #Adding $(srctree) adds about 20M on i386 to the size of the output file!
+ 
+-ifeq ($(KBUILD_OUTPUT),)
++ifeq ($(src),$(obj))
+ __srctree =
+ else
+ __srctree = $(srctree)/
+diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 
b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+@@ -44,7 +44,7 @@
+ 
+ #define PFX "powernow-k8: "
+ #define BFX PFX "BIOS error: "
+-#define VERSION "version 1.40.2"
++#define VERSION "version 1.40.4"
+ #include "powernow-k8.h"
+ 
+ /* serialize freq changes  */
+@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
+ {
+       struct powernow_k8_data *data;
+       cpumask_t oldmask = CPU_MASK_ALL;
+-      int rc;
++      int rc, i;
+ 
+       if (!check_supported_cpu(pol->cpu))
+               return -ENODEV;
+@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
+       printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+              data->currfid, data->currvid);
+ 
+-      powernow_data[pol->cpu] = data;
++      for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
++              powernow_data[i] = data;
++      }
+ 
+       return 0;
+ 
+diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
+--- a/arch/i386/kernel/process.c
++++ b/arch/i386/kernel/process.c
+@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+ 
++      memset(&info, 0, sizeof(info));
++
+       desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+ 
+       info.entry_number = idx;
+diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
+--- a/arch/ia64/kernel/ptrace.c
++++ b/arch/ia64/kernel/ptrace.c
+@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
+                               *data = (pt->cr_ipsr & IPSR_MASK);
+                       return 0;
+ 
++                    case PT_AR_RSC:
++                      if (write_access)
++                              pt->ar_rsc = *data | (3 << 2); /* force PL3 */
++                      else
++                              *data = pt->ar_rsc;
++                      return 0;
++
+                     case PT_AR_RNAT:
+                       urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
+                       rnat_addr = (long) ia64_rse_rnat_addr((long *)
+@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
+                     case PT_AR_BSPSTORE:
+                       ptr = pt_reg_addr(pt, ar_bspstore);
+                       break;
+-                    case PT_AR_RSC:
+-                      ptr = pt_reg_addr(pt, ar_rsc);
+-                      break;
+                     case PT_AR_UNAT:
+                       ptr = pt_reg_addr(pt, ar_unat);
+                       break;
+@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
+ static long
+ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user 
*ppr)
+ {
+-      unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
++      unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
+       struct unw_frame_info info;
+       struct switch_stack *sw;
+       struct ia64_fpreg fpval;
+@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
+       /* app regs */
+ 
+       retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
+-      retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
++      retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
+       retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
+       retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
+       retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
+@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
+       retval |= __get_user(nat_bits, &ppr->nat);
+ 
+       retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
++      retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
+       retval |= access_uarea(child, PT_AR_EC, &ec, 1);
+       retval |= access_uarea(child, PT_AR_LC, &lc, 1);
+       retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
+diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
+--- a/arch/ia64/kernel/signal.c
++++ b/arch/ia64/kernel/signal.c
+@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
+ static long
+ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
+ {
+-      unsigned long ip, flags, nat, um, cfm;
++      unsigned long ip, flags, nat, um, cfm, rsc;
+       long err;
+ 
+       /* Always make any pending restarted system calls return -EINTR */
+@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
+       err |= __get_user(ip, &sc->sc_ip);                      /* instruction 
pointer */
+       err |= __get_user(cfm, &sc->sc_cfm);
+       err |= __get_user(um, &sc->sc_um);                      /* user mask */
+-      err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
++      err |= __get_user(rsc, &sc->sc_ar_rsc);
+       err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
+       err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
+       err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
+@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
+       err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);       /* r15 
*/
+ 
+       scr->pt.cr_ifs = cfm | (1UL << 63);
++      scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
+ 
+       /* establish new instruction pointer: */
+       scr->pt.cr_iip = ip & ~0x3UL;
+diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
+--- a/arch/ppc/kernel/time.c
++++ b/arch/ppc/kernel/time.c
+@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
+ 
+ extern unsigned long wall_jiffies;
+ 
++/* used for timezone offset */
++static long timezone_offset;
++
+ DEFINE_SPINLOCK(rtc_lock);
+ 
+ EXPORT_SYMBOL(rtc_lock);
+@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
+                    xtime.tv_sec - last_rtc_update >= 659 &&
+                    abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 
500000/HZ &&
+                    jiffies - wall_jiffies == 1) {
+-                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) 
== 0)
++                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + 
timezone_offset) == 0)
+                               last_rtc_update = xtime.tv_sec+1;
+                       else
+                               /* Try again one minute later */
+@@ -286,7 +289,7 @@ void __init time_init(void)
+       unsigned old_stamp, stamp, elapsed;
+ 
+         if (ppc_md.time_init != NULL)
+-                time_offset = ppc_md.time_init();
++                timezone_offset = ppc_md.time_init();
+ 
+       if (__USE_RTC()) {
+               /* 601 processor: dec counts down by 128 every 128ns */
+@@ -331,10 +334,10 @@ void __init time_init(void)
+       set_dec(tb_ticks_per_jiffy);
+ 
+       /* If platform provided a timezone (pmac), we correct the time */
+-        if (time_offset) {
+-              sys_tz.tz_minuteswest = -time_offset / 60;
++        if (timezone_offset) {
++              sys_tz.tz_minuteswest = -timezone_offset / 60;
+               sys_tz.tz_dsttime = 0;
+-              xtime.tv_sec -= time_offset;
++              xtime.tv_sec -= timezone_offset;
+         }
+         set_normalized_timespec(&wall_to_monotonic,
+                                 -xtime.tv_sec, -xtime.tv_nsec);
+diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
+--- a/arch/ppc64/boot/zlib.c
++++ b/arch/ppc64/boot/zlib.c
+@@ -1307,7 +1307,7 @@ local int huft_build(
+   {
+     *t = (inflate_huft *)Z_NULL;
+     *m = 0;
+-    return Z_OK;
++    return Z_DATA_ERROR;
+   }
+ 
+ 
+@@ -1351,6 +1351,7 @@ local int huft_build(
+     if ((j = *p++) != 0)
+       v[x[j]++] = i;
+   } while (++i < n);
++  n = x[g];                   /* set n to length of v */
+ 
+ 
+   /* Generate the Huffman codes and for each, make the table entries */
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
+       return(arg.pid);
+ }
+ 
+-static int ptrace_child(void)
++static int ptrace_child(void *arg)
+ {
+       int ret;
+       int pid = os_getpid(), ppid = getppid();
+@@ -159,16 +159,20 @@ static int ptrace_child(void)
+       _exit(ret);
+ }
+ 
+-static int start_ptraced_child(void)
++static int start_ptraced_child(void **stack_out)
+ {
++      void *stack;
++      unsigned long sp;
+       int pid, n, status;
+       
+-      pid = fork();
+-      if(pid == 0)
+-              ptrace_child();
+-
++      stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
++                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
++      if(stack == MAP_FAILED)
++              panic("check_ptrace : mmap failed, errno = %d", errno);
++      sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
++      pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
+       if(pid < 0)
+-              panic("check_ptrace : fork failed, errno = %d", errno);
++              panic("check_ptrace : clone failed, errno = %d", errno);
+       CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+       if(n < 0)
+               panic("check_ptrace : wait failed, errno = %d", errno);
+@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
+               panic("check_ptrace : expected SIGSTOP, got status = %d",
+                     status);
+ 
++      *stack_out = stack;
+       return(pid);
+ }
+ 
+@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
+  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
+  * So only for SYSEMU features we test mustpanic, while normal host features
+  * must work anyway!*/
+-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
++static int stop_ptraced_child(int pid, void *stack, int exitcode, int 
mustpanic)
+ {
+       int status, n, ret = 0;
+ 
+       if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
+-              panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
++              panic("check_ptrace : ptrace failed, errno = %d", errno);
+       CATCH_EINTR(n = waitpid(pid, &status, 0));
+       if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
+               int exit_with = WEXITSTATUS(status);
+@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
+               printk("check_ptrace : child exited with exitcode %d, while "
+                     "expecting %d; status 0x%x", exit_with,
+                     exitcode, status);
+-              if (mustexit)
++              if (mustpanic)
+                       panic("\n");
+               else
+                       printk("\n");
+               ret = -1;
+       }
+ 
++      if(munmap(stack, PAGE_SIZE) < 0)
++              panic("check_ptrace : munmap failed, errno = %d", errno);
+       return ret;
+ }
+ 
+@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
+ 
+ static void __init check_sysemu(void)
+ {
++      void *stack;
+       int pid, syscall, n, status, count=0;
+ 
+       printk("Checking syscall emulation patch for ptrace...");
+       sysemu_supported = 0;
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+ 
+       if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
+               goto fail;
+@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
+               panic("check_sysemu : failed to modify system "
+                     "call return, errno = %d", errno);
+ 
+-      if (stop_ptraced_child(pid, 0, 0) < 0)
++      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+               goto fail_stopped;
+ 
+       sysemu_supported = 1;
+@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
+       set_using_sysemu(!force_sysemu_disabled);
+ 
+       printk("Checking advanced syscall emulation patch for ptrace...");
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+       while(1){
+               count++;
+               if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
+@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
+                       break;
+               }
+       }
+-      if (stop_ptraced_child(pid, 0, 0) < 0)
++      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+               goto fail_stopped;
+ 
+       sysemu_supported = 2;
+@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
+       return;
+ 
+ fail:
+-      stop_ptraced_child(pid, 1, 0);
++      stop_ptraced_child(pid, stack, 1, 0);
+ fail_stopped:
+       printk("missing\n");
+ }
+ 
+ void __init check_ptrace(void)
+ {
++      void *stack;
+       int pid, syscall, n, status;
+ 
+       printk("Checking that ptrace can change system call numbers...");
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+ 
+       if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) 
< 0)
+               panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", 
errno);
+@@ -330,7 +339,7 @@ void __init check_ptrace(void)
+                       break;
+               }
+       }
+-      stop_ptraced_child(pid, 0, 1);
++      stop_ptraced_child(pid, stack, 0, 1);
+       printk("OK\n");
+       check_sysemu();
+ }
+@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
+ static inline int check_skas3_ptrace_support(void)
+ {
+       struct ptrace_faultinfo fi;
++      void *stack;
+       int pid, n, ret = 1;
+ 
+       printf("Checking for the skas3 patch in the host...");
+-      pid = start_ptraced_child();
++      pid = start_ptraced_child(&stack);
+ 
+       n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
+       if (n < 0) {
+@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
+       }
+ 
+       init_registers(pid);
+-      stop_ptraced_child(pid, 1, 1);
++      stop_ptraced_child(pid, stack, 1, 1);
+ 
+       return(ret);
+ }
+diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
+--- a/arch/x86_64/ia32/syscall32.c
++++ b/arch/x86_64/ia32/syscall32.c
+@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
+       int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
++      int ret;
+ 
+       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       if (!vma)
+@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
+       vma->vm_mm = mm;
+ 
+       down_write(&mm->mmap_sem);
+-      insert_vm_struct(mm, vma);
++      if ((ret = insert_vm_struct(mm, vma))) {
++              up_write(&mm->mmap_sem);
++              kmem_cache_free(vm_area_cachep, vma);
++              return ret;
++      }
+       mm->total_vm += npages;
+       up_write(&mm->mmap_sem);
+       return 0;
+diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
+--- a/arch/x86_64/kernel/setup.c
++++ b/arch/x86_64/kernel/setup.c
+@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
+       int cpu = smp_processor_id();
+       int node = 0;
+       unsigned bits;
+-      if (c->x86_num_cores == 1)
+-              return;
+ 
+       bits = 0;
+       while ((1 << bits) < c->x86_num_cores)
+diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
+--- a/arch/x86_64/kernel/smp.c
++++ b/arch/x86_64/kernel/smp.c
+@@ -284,6 +284,71 @@ struct call_data_struct {
+ static struct call_data_struct * call_data;
+ 
+ /*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ */
++static void __smp_call_function_single (int cpu, void (*func) (void *info), 
void *info,
++                              int nonatomic, int wait)
++{
++      struct call_data_struct data;
++      int cpus = 1;
++
++      data.func = func;
++      data.info = info;
++      atomic_set(&data.started, 0);
++      data.wait = wait;
++      if (wait)
++              atomic_set(&data.finished, 0);
++
++      call_data = &data;
++      wmb();
++      /* Send a message to all other CPUs and wait for them to respond */
++      send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++      /* Wait for response */
++      while (atomic_read(&data.started) != cpus)
++              cpu_relax();
++
++      if (!wait)
++              return;
++
++      while (atomic_read(&data.finished) != cpus)
++              cpu_relax();
++}
++
++/*
++ * Run a function on another CPU
++ *  <func>    The function to run. This must be fast and non-blocking.
++ *  <info>    An arbitrary pointer to pass to the function.
++ *  <nonatomic>       Currently unused.
++ *  <wait>    If true, wait until function has completed on other CPUs.
++ *  [RETURNS]   0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
++      int nonatomic, int wait)
++{
++      
++      int me = get_cpu(); /* prevent preemption and reschedule on another 
processor */
++
++      if (cpu == me) {
++              printk("%s: trying to call self\n", __func__);
++              put_cpu();
++              return -EBUSY;
++      }
++      spin_lock_bh(&call_lock);
++
++      __smp_call_function_single(cpu, func,info,nonatomic,wait);      
++
++      spin_unlock_bh(&call_lock);
++      put_cpu();
++      return 0;
++}
++
++/*
+  * this function sends a 'generic call function' IPI to all other CPUs
+  * in the system.
+  */
+diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
+--- a/arch/x86_64/kernel/smpboot.c
++++ b/arch/x86_64/kernel/smpboot.c
+@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
+ {
+       unsigned long flags, i;
+ 
+-      if (smp_processor_id() != boot_cpu_id)
+-              return;
+-
+       go[MASTER] = 0;
+ 
+       local_irq_save(flags);
+@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
+       return tcenter - best_tm;
+ }
+ 
+-static __cpuinit void sync_tsc(void)
++static __cpuinit void sync_tsc(unsigned int master)
+ {
+       int i, done = 0;
+       long delta, adj, adjust_latency = 0;
+@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
+       } t[NUM_ROUNDS] __cpuinitdata;
+ #endif
+ 
++      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
++              smp_processor_id(), master);
++
+       go[MASTER] = 1;
+ 
+-      smp_call_function(sync_master, NULL, 1, 0);
++      /* It is dangerous to broadcast IPI as cpus are coming up,
++       * as they may not be ready to accept them.  So since
++       * we only need to send the ipi to the boot cpu direct
++       * the message, and avoid the race.
++       */
++      smp_call_function_single(master, sync_master, NULL, 1, 0);
+ 
+       while (go[MASTER])      /* wait for master to be ready */
+               no_cpu_relax();
+@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
+       printk(KERN_INFO
+              "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
+              "maxerr %lu cycles)\n",
+-             smp_processor_id(), boot_cpu_id, delta, rt);
++             smp_processor_id(), master, delta, rt);
+ }
+ 
+ static void __cpuinit tsc_sync_wait(void)
+ {
+       if (notscsync || !cpu_has_tsc)
+               return;
+-      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
+-                      boot_cpu_id);
+-      sync_tsc();
++      sync_tsc(0);
+ }
+ 
+ static __init int notscsync_setup(char *s)
+diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
+--- a/drivers/acpi/pci_irq.c
++++ b/drivers/acpi/pci_irq.c
+@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
+               printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
+                       pci_name(dev), ('A' + pin));
+               /* Interrupt Line values above 0xF are forbidden */
+-              if (dev->irq >= 0 && (dev->irq <= 0xF)) {
++              if (dev->irq > 0 && (dev->irq <= 0xF)) {
+                       printk(" - using IRQ %d\n", dev->irq);
++                      acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, 
ACPI_ACTIVE_LOW);
+                       return_VALUE(0);
+               }
+               else {
+diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
+--- a/drivers/char/rocket.c
++++ b/drivers/char/rocket.c
+@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
+               ToRecv = space;
+ 
+       if (ToRecv <= 0)
+-              return;
++              goto done;
+ 
+       /*
+        * if status indicates there are errored characters in the
+@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
+       }
+       /*  Push the data up to the tty layer */
+       ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
++done:
+       tty_ldisc_deref(ld);
+ }
+ 
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -32,12 +32,6 @@
+ 
+ #define       TPM_BUFSIZE                     2048
+ 
+-/* PCI configuration addresses */
+-#define       PCI_GEN_PMCON_1                 0xA0
+-#define       PCI_GEN1_DEC                    0xE4
+-#define       PCI_LPC_EN                      0xE6
+-#define       PCI_GEN2_DEC                    0xEC
+-
+ static LIST_HEAD(tpm_chip_list);
+ static DEFINE_SPINLOCK(driver_lock);
+ static int dev_mask[32];
+@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
+ EXPORT_SYMBOL_GPL(tpm_time_expired);
+ 
+ /*
+- * Initialize the LPC bus and enable the TPM ports
+- */
+-int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
+-{
+-      u32 lpcenable, tmp;
+-      int is_lpcm = 0;
+-
+-      switch (pci_dev->vendor) {
+-      case PCI_VENDOR_ID_INTEL:
+-              switch (pci_dev->device) {
+-              case PCI_DEVICE_ID_INTEL_82801CA_12:
+-              case PCI_DEVICE_ID_INTEL_82801DB_12:
+-                      is_lpcm = 1;
+-                      break;
+-              }
+-              /* init ICH (enable LPC) */
+-              pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
+-              lpcenable |= 0x20000000;
+-              pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
+-
+-              if (is_lpcm) {
+-                      pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
+-                                            &lpcenable);
+-                      if ((lpcenable & 0x20000000) == 0) {
+-                              dev_err(&pci_dev->dev,
+-                                      "cannot enable LPC\n");
+-                              return -ENODEV;
+-                      }
+-              }
+-
+-              /* initialize TPM registers */
+-              pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
+-
+-              if (!is_lpcm)
+-                      tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
+-              else
+-                      tmp =
+-                          (tmp & 0xFFFF0000) | (base & 0xFFF0) |
+-                          0x00000001;
+-
+-              pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
+-
+-              if (is_lpcm) {
+-                      pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
+-                                            &tmp);
+-                      tmp |= 0x00000004;      /* enable CLKRUN */
+-                      pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
+-                                             tmp);
+-              }
+-              tpm_write_index(0x0D, 0x55);    /* unlock 4F */
+-              tpm_write_index(0x0A, 0x00);    /* int disable */
+-              tpm_write_index(0x08, base);    /* base addr lo */
+-              tpm_write_index(0x09, (base & 0xFF00) >> 8);    /* base addr hi 
*/
+-              tpm_write_index(0x0D, 0xAA);    /* lock 4F */
+-              break;
+-      case PCI_VENDOR_ID_AMD:
+-              /* nothing yet */
+-              break;
+-      }
+-
+-      return 0;
+-}
+-
+-EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
+-
+-/*
+  * Internal kernel interface to transmit TPM commands
+  */
+ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
+@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
+       if (chip == NULL)
+               return -ENODEV;
+ 
+-      spin_lock(&driver_lock);
+-      tpm_lpc_bus_init(pci_dev, chip->vendor->base);
+-      spin_unlock(&driver_lock);
+-
+       return 0;
+ }
+ 
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
+ }
+ 
+ extern void tpm_time_expired(unsigned long);
+-extern int tpm_lpc_bus_init(struct pci_dev *, u16);
+-
+ extern int tpm_register_hardware(struct pci_dev *,
+                                struct tpm_vendor_specific *);
+ extern int tpm_open(struct inode *, struct file *);
+diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
+--- a/drivers/char/tpm/tpm_atmel.c
++++ b/drivers/char/tpm/tpm_atmel.c
+@@ -22,7 +22,10 @@
+ #include "tpm.h"
+ 
+ /* Atmel definitions */
+-#define       TPM_ATML_BASE                   0x400
++enum tpm_atmel_addr {
++      TPM_ATMEL_BASE_ADDR_LO = 0x08,
++      TPM_ATMEL_BASE_ADDR_HI = 0x09
++};
+ 
+ /* write status bits */
+ #define       ATML_STATUS_ABORT               0x01
+@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
+       .cancel = tpm_atml_cancel,
+       .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+       .req_complete_val = ATML_STATUS_DATA_AVAIL,
+-      .base = TPM_ATML_BASE,
+       .miscdev = { .fops = &atmel_ops, },
+ };
+ 
+@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
+ {
+       u8 version[4];
+       int rc = 0;
++      int lo, hi;
+ 
+       if (pci_enable_device(pci_dev))
+               return -EIO;
+ 
+-      if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
+-              rc = -ENODEV;
+-              goto out_err;
+-      }
++      lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
++      hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
++
++      tpm_atmel.base = (hi<<8)|lo;
++      dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
+ 
+       /* verify that it is an Atmel part */
+       if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
+diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
+--- a/drivers/char/tpm/tpm_nsc.c
++++ b/drivers/char/tpm/tpm_nsc.c
+@@ -24,6 +24,10 @@
+ /* National definitions */
+ #define       TPM_NSC_BASE                    0x360
+ #define       TPM_NSC_IRQ                     0x07
++#define       TPM_NSC_BASE0_HI                0x60
++#define       TPM_NSC_BASE0_LO                0x61
++#define       TPM_NSC_BASE1_HI                0x62
++#define       TPM_NSC_BASE1_LO                0x63
+ 
+ #define       NSC_LDN_INDEX                   0x07
+ #define       NSC_SID_INDEX                   0x20
+@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
+       .cancel = tpm_nsc_cancel,
+       .req_complete_mask = NSC_STATUS_OBF,
+       .req_complete_val = NSC_STATUS_OBF,
+-      .base = TPM_NSC_BASE,
+       .miscdev = { .fops = &nsc_ops, },
+       
+ };
+@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
+                                 const struct pci_device_id *pci_id)
+ {
+       int rc = 0;
++      int lo, hi;
++
++      hi = tpm_read_index(TPM_NSC_BASE0_HI);
++      lo = tpm_read_index(TPM_NSC_BASE0_LO);
++
++      tpm_nsc.base = (hi<<8) | lo;
+ 
+       if (pci_enable_device(pci_dev))
+               return -EIO;
+ 
+-      if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
+-              rc = -ENODEV;
+-              goto out_err;
+-      }
+-
+       /* verify that it is a National part (SID) */
+       if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
+               rc = -ENODEV;
+diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
+--- a/drivers/char/tty_ioctl.c
++++ b/drivers/char/tty_ioctl.c
+@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
+                       ld = tty_ldisc_ref(tty);
+                       switch (arg) {
+                       case TCIFLUSH:
+-                              if (ld->flush_buffer)
++                              if (ld && ld->flush_buffer)
+                                       ld->flush_buffer(tty);
+                               break;
+                       case TCIOFLUSH:
+-                              if (ld->flush_buffer)
++                              if (ld && ld->flush_buffer)
+                                       ld->flush_buffer(tty);
+                               /* fall through */
+                       case TCOFLUSH:
+diff --git a/drivers/media/video/cx88/cx88-video.c 
b/drivers/media/video/cx88/cx88-video.c
+--- a/drivers/media/video/cx88/cx88-video.c
++++ b/drivers/media/video/cx88/cx88-video.c
+@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
+                       .default_value = 0,
+                       .type          = V4L2_CTRL_TYPE_INTEGER,
+               },
+-              .off                   = 0,
++              .off                   = 128,
+               .reg                   = MO_HUE,
+               .mask                  = 0x00ff,
+               .shift                 = 0,
+diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
+--- a/drivers/net/e1000/e1000_main.c
++++ b/drivers/net/e1000/e1000_main.c
+@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
+       tso = e1000_tso(adapter, skb);
+       if (tso < 0) {
+               dev_kfree_skb_any(skb);
++              spin_unlock_irqrestore(&adapter->tx_lock, flags);
+               return NETDEV_TX_OK;
+       }
+ 
+diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
+--- a/drivers/net/hamradio/Kconfig
++++ b/drivers/net/hamradio/Kconfig
+@@ -17,7 +17,7 @@ config MKISS
+ 
+ config 6PACK
+       tristate "Serial port 6PACK driver"
+-      depends on AX25 && BROKEN_ON_SMP
++      depends on AX25
+       ---help---
+         6pack is a transmission protocol for the data exchange between your
+         PC and your TNC (the Terminal Node Controller acts as a kind of
+diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
+--- a/drivers/net/shaper.c
++++ b/drivers/net/shaper.c
+@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
+ {
+       struct shaper *shaper = dev->priv;
+       struct sk_buff *ptr;
+-   
+-      if (down_trylock(&shaper->sem))
+-              return -1;
+ 
++      spin_lock(&shaper->lock);
+       ptr=shaper->sendq.prev;
+       
+       /*
+@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
+                 shaper->stats.collisions++;
+       }
+       shaper_kick(shaper);
+-      up(&shaper->sem);
++      spin_unlock(&shaper->lock);
+       return 0;
+ }
+ 
+@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
+ {
+       struct shaper *shaper = (struct shaper *)data;
+ 
+-      if (!down_trylock(&shaper->sem)) {
+-              shaper_kick(shaper);
+-              up(&shaper->sem);
+-      } else
+-              mod_timer(&shaper->timer, jiffies);
++      spin_lock(&shaper->lock);
++      shaper_kick(shaper);
++      spin_unlock(&shaper->lock);
+ }
+ 
+ /*
+@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
+ 
+ 
+ /*
+- *    Flush the shaper queues on a closedown
+- */
+- 
+-static void shaper_flush(struct shaper *shaper)
+-{
+-      struct sk_buff *skb;
+-
+-      down(&shaper->sem);
+-      while((skb=skb_dequeue(&shaper->sendq))!=NULL)
+-              dev_kfree_skb(skb);
+-      shaper_kick(shaper);
+-      up(&shaper->sem);
+-}
+-
+-/*
+  *    Bring the interface up. We just disallow this until a 
+  *    bind.
+  */
+@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
+ static int shaper_close(struct net_device *dev)
+ {
+       struct shaper *shaper=dev->priv;
+-      shaper_flush(shaper);
++      struct sk_buff *skb;
++
++      while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
++              dev_kfree_skb(skb);
++
++      spin_lock_bh(&shaper->lock);
++      shaper_kick(shaper);
++      spin_unlock_bh(&shaper->lock);
++
+       del_timer_sync(&shaper->timer);
+       return 0;
+ }
+@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
+       init_timer(&sh->timer);
+       sh->timer.function=shaper_timer;
+       sh->timer.data=(unsigned long)sh;
++      spin_lock_init(&sh->lock);
+ }
+ 
+ /*
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
+       /* FIXME, once all of the existing PCI drivers have been fixed to set
+        * the pci shutdown function, this test can go away. */
+       if (!drv->driver.shutdown)
+-              drv->driver.shutdown = pci_device_shutdown,
++              drv->driver.shutdown = pci_device_shutdown;
+       drv->driver.owner = drv->owner;
+       drv->driver.kobj.ktype = &pci_driver_kobj_type;
+       pci_init_dynids(&drv->dynids);
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
+               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ 
+       fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+-      if (!rport)
++      if (!rport) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate fc remote port!\n");
++              return;
++      }
+ 
+       if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
+               fcport->os_target_id = rport->scsi_target_id;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1150,7 +1150,7 @@ iospace_error_exit:
+  */
+ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
+ {
+-      int     ret;
++      int     ret = -ENODEV;
+       device_reg_t __iomem *reg;
+       struct Scsi_Host *host;
+       scsi_qla_host_t *ha;
+@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
+       fc_port_t *fcport;
+ 
+       if (pci_enable_device(pdev))
+-              return -1;
++              goto probe_out;
+ 
+       host = scsi_host_alloc(&qla2x00_driver_template,
+           sizeof(scsi_qla_host_t));
+@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 
+       /* Configure PCI I/O space */
+       ret = qla2x00_iospace_config(ha);
+-      if (ret != 0) {
+-              goto probe_alloc_failed;
+-      }
++      if (ret)
++              goto probe_failed;
+ 
+       /* Sanitize the information from PCI BIOS. */
+       host->irq = pdev->irq;
+@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+               qla_printk(KERN_WARNING, ha,
+                   "[ERROR] Failed to allocate memory for adapter\n");
+ 
+-              goto probe_alloc_failed;
++              ret = -ENOMEM;
++              goto probe_failed;
+       }
+ 
+-      pci_set_drvdata(pdev, ha);
+-      host->this_id = 255;
+-      host->cmd_per_lun = 3;
+-      host->unique_id = ha->instance;
+-      host->max_cmd_len = MAX_CMDSZ;
+-      host->max_channel = ha->ports - 1;
+-      host->max_id = ha->max_targets;
+-      host->max_lun = ha->max_luns;
+-      host->transportt = qla2xxx_transport_template;
+-      if (scsi_add_host(host, &pdev->dev))
+-              goto probe_alloc_failed;
+-
+-      qla2x00_alloc_sysfs_attr(ha);
+-
+       if (qla2x00_initialize_adapter(ha) &&
+           !(ha->device_flags & DFLG_NO_CABLE)) {
+ 
+@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+                   "Adapter flags %x.\n",
+                   ha->host_no, ha->device_flags));
+ 
++              ret = -ENODEV;
+               goto probe_failed;
+       }
+ 
+-      qla2x00_init_host_attr(ha);
+-
+       /*
+        * Startup the kernel thread for this host adapter
+        */
+@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to start DPC thread!\n");
+ 
++              ret = -ENODEV;
+               goto probe_failed;
+       }
+       wait_for_completion(&ha->dpc_inited);
+ 
++      host->this_id = 255;
++      host->cmd_per_lun = 3;
++      host->unique_id = ha->instance;
++      host->max_cmd_len = MAX_CMDSZ;
++      host->max_channel = ha->ports - 1;
++      host->max_lun = MAX_LUNS;
++      host->transportt = qla2xxx_transport_template;
++
+       if (IS_QLA2100(ha) || IS_QLA2200(ha))
+               ret = request_irq(host->irq, qla2100_intr_handler,
+                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+       else
+               ret = request_irq(host->irq, qla2300_intr_handler,
+                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+-      if (ret != 0) {
++      if (ret) {
+               qla_printk(KERN_WARNING, ha,
+                   "Failed to reserve interrupt %d already in use.\n",
+                   host->irq);
+@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
+               msleep(10);
+       }
+ 
++      pci_set_drvdata(pdev, ha);
+       ha->flags.init_done = 1;
+       num_hosts++;
+ 
++      ret = scsi_add_host(host, &pdev->dev);
++      if (ret)
++              goto probe_failed;
++
++      qla2x00_alloc_sysfs_attr(ha);
++
++      qla2x00_init_host_attr(ha);
++
+       qla_printk(KERN_INFO, ha, "\n"
+           " QLogic Fibre Channel HBA Driver: %s\n"
+           "  QLogic %s - %s\n"
+@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
+ probe_failed:
+       fc_remove_host(ha->host);
+ 
+-      scsi_remove_host(host);
+-
+-probe_alloc_failed:
+       qla2x00_free_device(ha);
+ 
+       scsi_host_put(host);
+@@ -1394,7 +1394,8 @@ probe_alloc_failed:
+ probe_disable_device:
+       pci_disable_device(pdev);
+ 
+-      return -1;
++probe_out:
++      return ret;
+ }
+ EXPORT_SYMBOL_GPL(qla2x00_probe_one);
+ 
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -2969,23 +2969,22 @@ static void * dev_seq_start(struct seq_f
+ {
+       struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
+ 
++      s->private = it;
+       if (! it)
+               return NULL;
++
+       if (NULL == sg_dev_arr)
+-              goto err1;
++              return NULL;
+       it->index = *pos;
+       it->max = sg_last_dev();
+       if (it->index >= it->max)
+-              goto err1;
++              return NULL;
+       return it;
+-err1:
+-      kfree(it);
+-      return NULL;
+ }
+ 
+ static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
+ {
+-      struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
++      struct sg_proc_deviter * it = s->private;
+ 
+       *pos = ++it->index;
+       return (it->index < it->max) ? it : NULL;
+@@ -2993,7 +2992,9 @@ static void * dev_seq_next(struct seq_fi
+ 
+ static void dev_seq_stop(struct seq_file *s, void *v)
+ {
+-      kfree (v);
++      struct sg_proc_deviter * it = s->private;
++
++      kfree (it);
+ }
+ 
+ static int sg_proc_open_dev(struct inode *inode, struct file *file)
+diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
+--- a/drivers/usb/net/usbnet.c
++++ b/drivers/usb/net/usbnet.c
+@@ -1922,7 +1922,7 @@ static int genelink_rx_fixup (struct usb
+ 
+                       // copy the packet data to the new skb
+                       memcpy(skb_put(gl_skb, size), packet->packet_data, 
size);
+-                      skb_return (dev, skb);
++                      skb_return (dev, gl_skb);
+               }
+ 
+               // advance to the next packet
+diff --git a/fs/bio.c b/fs/bio.c
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
+        */
+       bio->bi_vcnt = bio_src->bi_vcnt;
+       bio->bi_size = bio_src->bi_size;
++      bio->bi_idx = bio_src->bi_idx;
+       bio_phys_segments(q, bio);
+       bio_hw_segments(q, bio);
+ }
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
+       struct char_device_struct *cd = NULL, **cp;
+       int i = major_to_index(major);
+ 
+-      up(&chrdevs_lock);
++      down(&chrdevs_lock);
+       for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
+               if ((*cp)->major == major &&
+                   (*cp)->baseminor == baseminor &&
+diff --git a/fs/exec.c b/fs/exec.c
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
+       }
+       sig->group_exit_task = NULL;
+       sig->notify_count = 0;
++      sig->real_timer.data = (unsigned long)current;
+       spin_unlock_irq(lock);
+ 
+       /*
+diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
+--- a/fs/isofs/compress.c
++++ b/fs/isofs/compress.c
+@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
+       cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
+       brelse(bh);
+ 
++      if (cstart > cend)
++              goto eio;
++              
+       csize = cend-cstart;
+ 
++      if (csize > deflateBound(1UL << zisofs_block_shift))
++              goto eio;
++
+       /* Now page[] contains an array of pages, any of which can be NULL,
+          and the locks on which we hold.  We should now read the data and
+          release the pages.  If the pages are NULL the decompressed data
+diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
+--- a/include/asm-i386/string.h
++++ b/include/asm-i386/string.h
+@@ -116,7 +116,8 @@ __asm__ __volatile__(
+       "orb $1,%%al\n"
+       "3:"
+       :"=a" (__res), "=&S" (d0), "=&D" (d1)
+-                   :"1" (cs),"2" (ct));
++      :"1" (cs),"2" (ct)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -138,8 +139,9 @@ __asm__ __volatile__(
+       "3:\tsbbl %%eax,%%eax\n\t"
+       "orb $1,%%al\n"
+       "4:"
+-                   :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+-                   :"1" (cs),"2" (ct),"3" (count));
++      :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
++      :"1" (cs),"2" (ct),"3" (count)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -158,7 +160,9 @@ __asm__ __volatile__(
+       "movl $1,%1\n"
+       "2:\tmovl %1,%0\n\t"
+       "decl %0"
+-      :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
++      :"=a" (__res), "=&S" (d0)
++      :"1" (s),"0" (c)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -175,7 +179,9 @@ __asm__ __volatile__(
+       "leal -1(%%esi),%0\n"
+       "2:\ttestb %%al,%%al\n\t"
+       "jne 1b"
+-      :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
++      :"=g" (__res), "=&S" (d0), "=&a" (d1)
++      :"0" (0),"1" (s),"2" (c)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -189,7 +195,9 @@ __asm__ __volatile__(
+       "scasb\n\t"
+       "notl %0\n\t"
+       "decl %0"
+-      :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
++      :"=c" (__res), "=&D" (d0)
++      :"1" (s),"a" (0), "0" (0xffffffffu)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -333,7 +341,9 @@ __asm__ __volatile__(
+       "je 1f\n\t"
+       "movl $1,%0\n"
+       "1:\tdecl %0"
+-      :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
++      :"=D" (__res), "=&c" (d0)
++      :"a" (c),"0" (cs),"1" (count)
++      :"memory");
+ return __res;
+ }
+ 
+@@ -369,7 +379,7 @@ __asm__ __volatile__(
+       "je 2f\n\t"
+       "stosb\n"
+       "2:"
+-      : "=&c" (d0), "=&D" (d1)
++      :"=&c" (d0), "=&D" (d1)
+       :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+       :"memory");
+ return (s);   
+@@ -392,7 +402,8 @@ __asm__ __volatile__(
+       "jne 1b\n"
+       "3:\tsubl %2,%0"
+       :"=a" (__res), "=&d" (d0)
+-      :"c" (s),"1" (count));
++      :"c" (s),"1" (count)
++      :"memory");
+ return __res;
+ }
+ /* end of additional stuff */
+@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
+               "dec %%edi\n"
+               "1:"
+               : "=D" (addr), "=c" (size)
+-              : "0" (addr), "1" (size), "a" (c));
++              : "0" (addr), "1" (size), "a" (c)
++              : "memory");
+       return addr;
+ }
+ 
+diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
+--- a/include/asm-x86_64/smp.h
++++ b/include/asm-x86_64/smp.h
+@@ -46,6 +46,8 @@ extern int pic_mode;
+ extern int smp_num_siblings;
+ extern void smp_flush_tlb(void);
+ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
++extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
++                                   int retry, int wait);
+ extern void smp_send_reschedule(int cpu);
+ extern void smp_invalidate_rcv(void);         /* Process an NMI */
+ extern void zap_low_mappings(void);
+diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
+--- a/include/linux/if_shaper.h
++++ b/include/linux/if_shaper.h
+@@ -23,7 +23,7 @@ struct shaper
+       __u32 shapeclock;
+       unsigned long recovery; /* Time we can next clock a packet out on
+                                  an empty queue */
+-      struct semaphore sem;
++      spinlock_t lock;
+         struct net_device_stats stats;
+       struct net_device *dev;
+       int  (*hard_start_xmit) (struct sk_buff *skb,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
+ {
+       int hlen = skb_headlen(skb);
+ 
+-      if (offset + len <= hlen)
++      if (hlen - offset >= len)
+               return skb->data + offset;
+ 
+       if (skb_copy_bits(skb, offset, buffer, len) < 0)
+diff --git a/include/linux/zlib.h b/include/linux/zlib.h
+--- a/include/linux/zlib.h
++++ b/include/linux/zlib.h
+@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
+    stream state was inconsistent (such as zalloc or state being NULL).
+ */
+ 
++static inline unsigned long deflateBound(unsigned long s)
++{
++      return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
++}
++
+ extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
+ /*
+      Dynamically update the compression level and compression strategy.  The
+diff --git a/kernel/module.c b/kernel/module.c
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
+ /* Created by linker magic */
+ extern char __per_cpu_start[], __per_cpu_end[];
+ 
+-static void *percpu_modalloc(unsigned long size, unsigned long align)
++static void *percpu_modalloc(unsigned long size, unsigned long align,
++                           const char *name)
+ {
+       unsigned long extra;
+       unsigned int i;
+       void *ptr;
+ 
+-      BUG_ON(align > SMP_CACHE_BYTES);
++      if (align > SMP_CACHE_BYTES) {
++              printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
++                     name, align, SMP_CACHE_BYTES);
++              align = SMP_CACHE_BYTES;
++      }
+ 
+       ptr = __per_cpu_start;
+       for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
+@@ -347,7 +352,8 @@ static int percpu_modinit(void)
+ }     
+ __initcall(percpu_modinit);
+ #else /* ... !CONFIG_SMP */
+-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
++static inline void *percpu_modalloc(unsigned long size, unsigned long align,
++                                  const char *name)
+ {
+       return NULL;
+ }
+@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
+       if (pcpuindex) {
+               /* We have a special allocation for this section. */
+               percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
+-                                       sechdrs[pcpuindex].sh_addralign);
++                                       sechdrs[pcpuindex].sh_addralign,
++                                       mod->name);
+               if (!percpu) {
+                       err = -ENOMEM;
+                       goto free_mod;
+diff --git a/kernel/signal.c b/kernel/signal.c
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -686,7 +686,7 @@ static void handle_stop_signal(int sig, 
+ {
+       struct task_struct *t;
+ 
+-      if (p->flags & SIGNAL_GROUP_EXIT)
++      if (p->signal->flags & SIGNAL_GROUP_EXIT)
+               /*
+                * The process is in the middle of dying already.
+                */
+diff --git a/lib/inflate.c b/lib/inflate.c
+--- a/lib/inflate.c
++++ b/lib/inflate.c
+@@ -326,7 +326,7 @@ DEBG("huft1 ");
+   {
+     *t = (struct huft *)NULL;
+     *m = 0;
+-    return 0;
++    return 2;
+   }
+ 
+ DEBG("huft2 ");
+@@ -374,6 +374,7 @@ DEBG("huft5 ");
+     if ((j = *p++) != 0)
+       v[x[j]++] = i;
+   } while (++i < n);
++  n = x[g];                   /* set n to length of v */
+ 
+ DEBG("h6 ");
+ 
+@@ -410,12 +411,13 @@ DEBG1("1 ");
+ DEBG1("2 ");
+           f -= a + 1;           /* deduct codes from patterns left */
+           xp = c + k;
+-          while (++j < z)       /* try smaller tables up to z bits */
+-          {
+-            if ((f <<= 1) <= *++xp)
+-              break;            /* enough codes to use up j bits */
+-            f -= *xp;           /* else deduct codes from patterns */
+-          }
++          if (j < z)
++            while (++j < z)       /* try smaller tables up to z bits */
++            {
++              if ((f <<= 1) <= *++xp)
++                break;            /* enough codes to use up j bits */
++              f -= *xp;           /* else deduct codes from patterns */
++            }
+         }
+ DEBG1("3 ");
+         z = 1 << j;             /* table entries for j-bit table */
+diff --git a/mm/memory.c b/mm/memory.c
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
+ {
+       pgd_t *pgd;
+       unsigned long next;
+-      unsigned long end = addr + size;
++      unsigned long end = addr + PAGE_ALIGN(size);
+       struct mm_struct *mm = vma->vm_mm;
+       int err;
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
+       struct mempolicy *new;
+       DECLARE_BITMAP(nodes, MAX_NUMNODES);
+ 
+-      if (mode > MPOL_MAX)
++      if (mode < 0 || mode > MPOL_MAX)
+               return -EINVAL;
+       err = get_nodes(nodes, nmask, maxnode, mode);
+       if (err)
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
+                       if (!vlandev)
+                               continue;
+ 
++                      if (netif_carrier_ok(dev)) {
++                              if (!netif_carrier_ok(vlandev))
++                                      netif_carrier_on(vlandev);
++                      } else {
++                              if (netif_carrier_ok(vlandev))
++                                      netif_carrier_off(vlandev);
++                      }
++
+                       if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
+                               vlandev->state = (vlandev->state &~ 
VLAN_LINK_STATE_MASK) 
+                                       | flgs;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_
+ {
+       struct sk_buff *skb;
+ 
+-      ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
+-                     icmp_param->data_len+icmp_param->head_len,
+-                     icmp_param->head_len,
+-                     ipc, rt, MSG_DONTWAIT);
+-
+-      if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
++      if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
++                         icmp_param->data_len+icmp_param->head_len,
++                         icmp_param->head_len,
++                         ipc, rt, MSG_DONTWAIT) < 0)
++              ip_flush_pending_frames(icmp_socket->sk);
++      else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
+               struct icmphdr *icmph = skb->h.icmph;
+               unsigned int csum = 0;
+               struct sk_buff *skb1;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
+ #ifdef CONFIG_NETFILTER_DEBUG
+       nf_debug_ip_loopback_xmit(newskb);
+ #endif
+-      nf_reset(newskb);
+       netif_rx(newskb);
+       return 0;
+ }
+@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
+       nf_debug_ip_finish_output2(skb);
+ #endif /*CONFIG_NETFILTER_DEBUG*/
+ 
+-      nf_reset(skb);
+-
+       if (hh) {
+               int hh_alen;
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -848,6 +848,9 @@ mc_msf_out:
+  
+               case IP_IPSEC_POLICY:
+               case IP_XFRM_POLICY:
++                      err = -EPERM;
++                      if (!capable(CAP_NET_ADMIN))
++                              break;
+                       err = xfrm_user_policy(sk, optname, optval, optlen);
+                       break;
+ 
+diff --git a/net/ipv4/netfilter/ip_conntrack_core.c 
b/net/ipv4/netfilter/ip_conntrack_core.c
+--- a/net/ipv4/netfilter/ip_conntrack_core.c
++++ b/net/ipv4/netfilter/ip_conntrack_core.c
+@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
+               schedule();
+               goto i_see_dead_people;
+       }
++      /* wait until all references to ip_conntrack_untracked are dropped */
++      while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
++              schedule();
+ 
+       kmem_cache_destroy(ip_conntrack_cachep);
+       kmem_cache_destroy(ip_conntrack_expect_cachep);
+diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c 
b/net/ipv4/netfilter/ip_conntrack_standalone.c
+--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
++++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
+@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
+                                       const struct net_device *out,
+                                       int (*okfn)(struct sk_buff *))
+ {
++#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
++      /* Previously seen (loopback)?  Ignore.  Do this before
++           fragment check. */
++      if ((*pskb)->nfct)
++              return NF_ACCEPT;
++#endif
++
+       /* Gather fragments. */
+       if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+               *pskb = ip_ct_gather_frags(*pskb,
+diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c 
b/net/ipv4/netfilter/ip_nat_proto_tcp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
+@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
+                enum ip_nat_manip_type maniptype,
+                const struct ip_conntrack *conntrack)
+ {
+-      static u_int16_t port, *portptr;
++      static u_int16_t port;
++      u_int16_t *portptr;
+       unsigned int range_size, min, i;
+ 
+       if (maniptype == IP_NAT_MANIP_SRC)
+diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c 
b/net/ipv4/netfilter/ip_nat_proto_udp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
+@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
+                enum ip_nat_manip_type maniptype,
+                const struct ip_conntrack *conntrack)
+ {
+-      static u_int16_t port, *portptr;
++      static u_int16_t port;
++      u_int16_t *portptr;
+       unsigned int range_size, min, i;
+ 
+       if (maniptype == IP_NAT_MANIP_SRC)
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -198,12 +198,13 @@ resubmit:
+               if (!raw_sk) {
+                       if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+                               IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+-                              icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, 
nhoff);
++                              icmpv6_send(skb, ICMPV6_PARAMPROB,
++                                          ICMPV6_UNK_NEXTHDR, nhoff,
++                                          skb->dev);
+                       }
+-              } else {
++              } else
+                       IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+-                      kfree_skb(skb);
+-              }
++              kfree_skb(skb);
+       }
+       rcu_read_unlock();
+       return 0;
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -503,6 +503,9 @@ done:
+               break;
+       case IPV6_IPSEC_POLICY:
+       case IPV6_XFRM_POLICY:
++              retv = -EPERM;
++              if (!capable(CAP_NET_ADMIN))
++                      break;
+               retv = xfrm_user_policy(sk, optname, optval, optlen);
+               break;
+ 
+diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
+--- a/net/ipv6/netfilter/ip6_queue.c
++++ b/net/ipv6/netfilter/ip6_queue.c
+@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
+ static void
+ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
+ {
++      local_bh_disable();
+       nf_reinject(entry->skb, entry->info, verdict);
++      local_bh_enable();
+       kfree(entry);
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -315,8 +315,8 @@ err:
+ static void netlink_remove(struct sock *sk)
+ {
+       netlink_table_grab();
+-      nl_table[sk->sk_protocol].hash.entries--;
+-      sk_del_node_init(sk);
++      if (sk_del_node_init(sk))
++              nl_table[sk->sk_protocol].hash.entries--;
+       if (nlk_sk(sk)->groups)
+               __sk_del_bind_node(sk);
+       netlink_table_ungrab();
+@@ -429,7 +429,12 @@ retry:
+       err = netlink_insert(sk, pid);
+       if (err == -EADDRINUSE)
+               goto retry;
+-      return 0;
++
++      /* If 2 threads race to autobind, that is fine.  */
++      if (err == -EBUSY)
++              err = 0;
++
++      return err;
+ }
+ 
+ static inline int netlink_capable(struct socket *sock, unsigned int flag) 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
+       dst_release(skb->dst);
+       skb->dst = NULL;
+ 
++      /* drop conntrack reference */
++      nf_reset(skb);
++
+       spkt = (struct sockaddr_pkt*)skb->cb;
+ 
+       skb_push(skb, skb->data-skb->mac.raw);
+@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
+       dst_release(skb->dst);
+       skb->dst = NULL;
+ 
++      /* drop conntrack reference */
++      nf_reset(skb);
++
+       spin_lock(&sk->sk_receive_queue.lock);
+       po->stats.tp_packets++;
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
+       if (nr > XFRM_MAX_DEPTH)
+               return NULL;
+ 
++      if (p->dir > XFRM_POLICY_OUT)
++              return NULL;
++
+       xp = xfrm_policy_alloc(GFP_KERNEL);
+       if (xp == NULL) {
+               *dir = -ENOBUFS;
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
+ 
+       if (keyring->description) {
+               write_lock(&keyring_name_lock);
+-              list_del(&keyring->type_data.link);
++
++              if (keyring->type_data.link.next != NULL &&
++                  !list_empty(&keyring->type_data.link))
++                      list_del(&keyring->type_data.link);
++
+               write_unlock(&keyring_name_lock);
+       }
+ 
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
+               keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
+               if (IS_ERR(keyring)) {
+                       ret = PTR_ERR(keyring);
+-                      goto error;
++                      goto error2;
+               }
+       }
+       else if (IS_ERR(keyring)) {
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/vif-common.sh
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/tools/examples/vif-common.sh      Mon Oct 24 15:08:13 2005
@@ -0,0 +1,74 @@
+#
+# Copyright (c) 2005 XenSource Ltd.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+
+
+dir=$(dirname "$0")
+. "$dir/xen-hotplug-common.sh"
+. "$dir/xen-network-common.sh"
+
+command="$1"
+
+if [ "$command" != "up" ] && [ "$command" != "down" ]
+then
+  log err "Invalid command: $command"
+  exit 1
+fi
+
+
+XENBUS_PATH="${XENBUS_PATH:?}"
+vif="${vif:?}"
+
+ip=$(xenstore-read "$XENBUS_PATH/ip" >&/dev/null || true)
+
+
+function frob_iptable()
+{
+  if [ "$command" == "up" ]
+  then
+    local c="-A"
+  else
+    local c="-D"
+  fi
+
+  iptables "$c" FORWARD -m physdev --physdev-in "$vif" "$@" -j ACCEPT
+}
+
+
+##
+# Add or remove the appropriate entries in the iptables.  With antispoofing
+# turned on, we have to explicitly allow packets to the interface, regardless
+# of the ip setting.  If ip is set, then we additionally restrict the packets
+# to those coming from the specified networks, though we allow DHCP requests
+# as well.
+#
+function handle_iptable()
+{
+  if [ "$ip" != "" ]
+  then
+      local addr
+      for addr in "$ip"
+      do
+        frob_iptable -s "$addr"
+      done
+
+      # Always allow the domain to talk to a DHCP server.
+      frob_iptable -p udp --sport 68 --dport 67
+  else
+      # No IP addresses have been specified, so allow anything.
+      frob_iptable
+  fi
+}
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/examples/xen-network-common.sh
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/tools/examples/xen-network-common.sh      Mon Oct 24 15:08:13 2005
@@ -0,0 +1,37 @@
+#
+# Copyright (c) 2005 XenSource Ltd.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+
+
+# Gentoo doesn't have ifup/ifdown: define appropriate alternatives
+if ! which ifup >&/dev/null
+then
+  if [ -e /etc/conf.d/net ]
+  then
+    ifup()
+    {
+      /etc/init.d/net.$1 start
+    }
+    ifdown()
+    {
+      /etc/init.d/net.$1 stop
+    }
+  else
+    logger -p "daemon.crit" -- \
+      "You don't have ifup and don't seem to be running Gentoo either!"
+    exit 1
+  fi
+fi
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/ioemu/hw/i8259_stub.c
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/tools/ioemu/hw/i8259_stub.c       Mon Oct 24 15:08:13 2005
@@ -0,0 +1,96 @@
+/* Xen 8259 stub for interrupt controller emulation
+ * 
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2005      Intel corperation
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "xenctrl.h"
+#include <xen/io/ioreq.h>
+#include <stdio.h>
+#include "cpu.h"
+#include "cpu-all.h"
+
+static __inline__ void atomic_set_bit(long nr, volatile void *addr)
+{
+        __asm__ __volatile__(
+                "lock ; bts %1,%0"
+                :"=m" (*(volatile long *)addr)
+                :"dIr" (nr));
+}
+static __inline__ void atomic_clear_bit(long nr, volatile void *addr)
+{
+        __asm__ __volatile__(
+                "lock ; btr %1,%0"
+                :"=m" (*(volatile long *)addr)
+                :"dIr" (nr));
+}
+
+#include <vl.h>
+extern shared_iopage_t *shared_page;
+extern CPUState *global_env;
+void pic_set_irq(int irq, int level)
+{
+    global_iodata_t  *gio;
+    int  mask;
+
+    gio = &shared_page->sp_global;
+    mask = 1 << irq;
+    if ( gio->pic_elcr & mask ) {
+        /* level */
+       if ( level ) {
+           atomic_set_bit(irq, &gio->pic_irr);
+           atomic_clear_bit(irq, &gio->pic_clear_irr);
+           global_env->send_event = 1;
+       }
+       else {
+           atomic_set_bit(irq, &gio->pic_clear_irr);
+           atomic_clear_bit(irq, &gio->pic_irr);
+           global_env->send_event = 1;
+       }
+    }
+    else {
+       /* edge */
+       if ( level ) {
+           if ( (mask & gio->pic_last_irr) == 0 ) { 
+               atomic_set_bit(irq, &gio->pic_irr);
+               atomic_set_bit(irq, &gio->pic_last_irr);
+               global_env->send_event = 1;
+           }
+       }
+       else {
+           atomic_clear_bit(irq, &gio->pic_last_irr);
+       }
+    }
+}
+
+void irq_info(void)
+{
+    term_printf("irq statistic code not compiled.\n");
+}
+
+void pic_info(void)
+{
+    term_printf("pic_infoi code not compiled.\n");
+}
+
+void pic_init(void)
+{
+}
+
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/util/diagnose.py
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/util/diagnose.py Mon Oct 24 15:08:13 2005
@@ -0,0 +1,150 @@
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+# Copyright (c) 2005 XenSource Ltd
+
+
+import re
+import sys
+
+from xen.xend import sxp
+
+from xen.xend.XendClient import server
+from xen.xend.XendError import XendError
+from xen.xend.xenstore.xstransact import xstransact
+
+import xen.xend.XendProtocol
+
+
+domain = None
+domid = None
+deviceClass = None
+device = None
+frontendPath = None
+backendPath = None
+
+
+def diagnose(dom):
+    global domain
+    global domid
+    global dompath
+    
+    try:
+        domain = server.xend_domain(dom)
+        state = sxp.child_value(domain, 'state')
+        domid = int(sxp.child_value(domain, 'domid'))
+        name = sxp.child_value(domain, 'name')
+        dompath = '/local/domain/%d' % domid
+
+        print "Domain ID is %d." % domid
+        print "Domain name is %s." % name
+
+        if not state:
+            raise XendError("Cannot find state")
+
+        if state.find('c') != -1:
+            print "Domain has crashed."
+
+        diagnose_console()
+
+        diagnose_devices()
+    except xen.xend.XendProtocol.XendError, exn:
+        print exn
+
+
+def diagnose_console():
+    port    = xstransact.Read(dompath + '/console/port')
+    ringref = xstransact.Read(dompath + '/console/ring-ref')
+    tty     = xstransact.Read(dompath + '/console/tty')
+
+    if not port:
+        print "Console port is missing; Xend has failed."
+    if not ringref:
+        print "Console ring-ref is missing; Xend has failed."
+    if not tty:
+        print "Console tty is missing; Xenconsoled has failed."
+
+
+def diagnose_devices():
+    global deviceClass
+    global device
+    global frontendPath
+    global backendPath
+    
+    device_path = dompath + '/device'
+
+    device_classes = xstransact.List(device_path)
+
+    print "Found %d device classes in use." % len(device_classes)
+
+    for dc in device_classes:
+        deviceClass = dc
+        device_class_path = device_path + '/' + deviceClass
+
+        devices = xstransact.List(device_class_path)
+
+        print "Found %d %s devices." % (len(devices), deviceClass)
+
+        for d in devices:
+            device = d
+            
+            print "Found device %s, %s." % (deviceClass, device)
+
+            frontendPath = device_class_path + '/' + device
+            backendPath = xstransact.Read(frontendPath, 'backend')
+
+            if not backendPath:
+                print ("Cannot find backend path for device %s, %s." %
+                       (deviceClass, device))
+            else:
+                backend_error = xstransact.Read(
+                    backendPath.replace('backend/', 'error/backend/'),
+                    'error')
+
+                if backend_error:
+                    diagnose_device_error(backend_error)
+
+
+def diagnose_device_error(err):
+    if re.search("2 reading .*/ring-ref and event-channel", err):
+        print ("Backend is stuck waiting for frontend for device %s, %s." %
+               (deviceClass, device))
+        diagnose_stuck_frontend()
+    else:
+        print ("Device %s, %s shows error %s." %
+               (deviceClass, device, err))
+
+
+def diagnose_stuck_frontend():
+    if deviceClass == "vbd":
+        phy = xstransact.Read(backendPath, 'physical-device')
+
+        if phy:
+            print ("Device %s, %s hotplugging has completed successfully." %
+                   (deviceClass, device))
+        else:
+            print ("Device %s, %s hotplugging failed." %
+                   (deviceClass, device))
+
+
+def main(argv = None):
+    if argv is None:
+        argv = sys.argv
+
+    diagnose(argv[1])
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/xend/tests/test_uuid.py
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/tools/python/xen/xend/tests/test_uuid.py  Mon Oct 24 15:08:13 2005
@@ -0,0 +1,30 @@
+import unittest
+
+from xen.xend import uuid
+
+
+class test_uuid(unittest.TestCase):
+
+    def testStringRoundtrip(self):
+        def t(inp):
+            self.assertEqual(uuid.fromString(uuid.toString(inp)), inp)
+
+        t(uuid.create())
+        t(uuid.create())
+        t(uuid.create())
+        t(uuid.create())
+        t(uuid.create())
+
+
+    def testToFromString(self):
+        def t(inp, expected):
+            self.assertEqual(uuid.toString(inp), expected)
+            self.assertEqual(uuid.fromString(expected), inp)
+
+        t([0 for i in range(0, 16)], "00000000-0000-0000-0000-000000000000")
+        t([185, 158, 125, 206, 250, 178, 125, 57, 2, 6, 162, 74, 178, 236,
+           196, 5], "b99e7dce-fab2-7d39-0206-a24ab2ecc405")
+
+
+def test_suite():
+    return unittest.makeSuite(test_uuid)
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/security/get_decision.c
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/tools/security/get_decision.c     Mon Oct 24 15:08:13 2005
@@ -0,0 +1,176 @@
+/****************************************************************
+ * get_decision.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@xxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * An example program that shows how to retrieve an access control
+ * decision from the hypervisor ACM based on the currently active policy.
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <netinet/in.h>
+#include <xen/acm.h>
+#include <xen/acm_ops.h>
+#include <xen/linux/privcmd.h>
+
+#define PERROR(_m, _a...) \
+fprintf(stderr, "ERROR: " _m " (%d = %s)\n" , ## _a ,  \
+                errno, strerror(errno))
+
+void usage(char *progname)
+{
+    printf("Use: %s \n", progname);
+    printf(" Test program illustrating the retrieval of\n");
+    printf(" access control decisions from xen. At this time,\n");
+    printf(" only sharing (STE) policy decisions are supported.\n");
+    printf(" parameter options:\n");
+    printf("\t -i domid -i domid\n");
+    printf("\t -i domid -s ssidref\n");
+    printf("\t -s ssidref -s ssidref\n\n");
+    exit(-1);
+}
+
+static inline int do_policycmd(int xc_handle, unsigned int cmd,
+                               unsigned long data)
+{
+    return ioctl(xc_handle, cmd, data);
+}
+
+static inline int do_xen_hypercall(int xc_handle,
+                                   privcmd_hypercall_t * hypercall)
+{
+    return do_policycmd(xc_handle,
+                        IOCTL_PRIVCMD_HYPERCALL,
+                        (unsigned long) hypercall);
+}
+
+static inline int do_acm_op(int xc_handle, struct acm_op *op)
+{
+    int ret = -1;
+    privcmd_hypercall_t hypercall;
+
+    op->interface_version = ACM_INTERFACE_VERSION;
+
+    hypercall.op = __HYPERVISOR_acm_op;
+    hypercall.arg[0] = (unsigned long) op;
+
+    if (mlock(op, sizeof(*op)) != 0) {
+        PERROR("Could not lock memory for Xen policy hypercall");
+        goto out1;
+    }
+
+    if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0) {
+        if (errno == EACCES)
+            fprintf(stderr, "ACM operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+        goto out2;
+    }
+
+  out2:(void) munlock(op, sizeof(*op));
+  out1:return ret;
+}
+
+
+/************************ get decision ******************************/
+
+/* this example uses two domain ids and retrieves the decision if these domains
+ * can share information (useful, i.e., to enforce policy onto network traffic 
in dom0
+ */
+int acm_get_decision(int xc_handle, int argc, char *const argv[])
+{
+    struct acm_op op;
+    int ret;
+
+    op.cmd = ACM_GETDECISION;
+    op.interface_version = ACM_INTERFACE_VERSION;
+    op.u.getdecision.get_decision_by1 = UNSET;
+    op.u.getdecision.get_decision_by2 = UNSET;
+    op.u.getdecision.hook = SHARING;
+
+    while (1) {
+        int c = getopt(argc, argv, "i:s:");
+        if (c == -1)
+            break;
+
+        if (c == 'i') {
+            if (op.u.getdecision.get_decision_by1 == UNSET) {
+                op.u.getdecision.get_decision_by1 = DOMAINID;
+                op.u.getdecision.id1.domainid = strtoul(optarg, NULL, 0);
+            } else if (op.u.getdecision.get_decision_by2 == UNSET) {
+                op.u.getdecision.get_decision_by2 = DOMAINID;
+                op.u.getdecision.id2.domainid = strtoul(optarg, NULL, 0);
+            } else
+                usage(argv[0]);
+        } else if (c == 's') {
+            if (op.u.getdecision.get_decision_by1 == UNSET) {
+                op.u.getdecision.get_decision_by1 = SSIDREF;
+                op.u.getdecision.id1.ssidref = strtoul(optarg, NULL, 0);
+            } else if (op.u.getdecision.get_decision_by2 == UNSET) {
+                op.u.getdecision.get_decision_by2 = SSIDREF;
+                op.u.getdecision.id2.ssidref = strtoul(optarg, NULL, 0);
+            } else
+                usage(argv[0]);
+        } else
+            usage(argv[0]);
+    }
+    if ((op.u.getdecision.get_decision_by1 == UNSET) ||
+        (op.u.getdecision.get_decision_by2 == UNSET))
+        usage(argv[0]);
+
+    if ((ret = do_acm_op(xc_handle, &op))) {
+        printf("%s: Error getting decision (%d).\n", __func__, ret);
+        printf("%s: decision = %s.\n", __func__,
+               (op.u.getdecision.acm_decision ==
+                ACM_ACCESS_PERMITTED) ? "PERMITTED" : ((op.u.getdecision.
+                                                        acm_decision ==
+                                                        ACM_ACCESS_DENIED)
+                                                       ? "DENIED" :
+                                                       "ERROR"));
+        return ret;
+    }
+    return op.u.getdecision.acm_decision;
+}
+
+/***************************** main **************************************/
+
+int main(int argc, char **argv)
+{
+
+    int acm_cmd_fd, ret = 0;
+
+    if (argc < 5)
+        usage(argv[0]);
+
+    if ((acm_cmd_fd = open("/proc/xen/privcmd", O_RDONLY)) <= 0) {
+        printf("ERROR: Could not open xen privcmd device!\n");
+        exit(-1);
+    }
+
+    ret = acm_get_decision(acm_cmd_fd, argc, argv);
+
+    printf("Decision: %s (%d)\n",
+           (ret == ACM_ACCESS_PERMITTED) ? "PERMITTED" :
+           ((ret == ACM_ACCESS_DENIED) ? "DENIED" : "ERROR"), ret);
+
+    close(acm_cmd_fd);
+    return ret;
+}
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/xenstore/xenstored_proc.h
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/tools/xenstore/xenstored_proc.h   Mon Oct 24 15:08:13 2005
@@ -0,0 +1,27 @@
+/* 
+    Copyright (C) 2005 XenSource Ltd
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+*/
+
+#ifndef _XENSTORED_PROC_H
+#define _XENSTORED_PROC_H
+
+#define XENSTORED_PROC_MFN  "/proc/xen/xsd_mfn"
+#define XENSTORED_PROC_PORT "/proc/xen/xsd_port"
+
+
+#endif /* _XENSTORED_PROC_H */
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/arch/x86/dm/i8259.c
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/xen/arch/x86/dm/i8259.c   Mon Oct 24 15:08:13 2005
@@ -0,0 +1,520 @@
+/*
+ * QEMU 8259 interrupt controller emulation
+ * 
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2005 Intel Corperation
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/mm.h>
+#include <xen/xmalloc.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <public/io/ioreq.h>
+#include <asm/vmx.h>
+#include <public/io/vmx_vpic.h>
+#include <public/io/vmx_vlapic.h>
+#include <asm/current.h>
+
+/* set irq level. If an edge is detected, then the IRR is set to 1 */
+static inline void pic_set_irq1(PicState *s, int irq, int level)
+{
+    int mask;
+    mask = 1 << irq;
+    if (s->elcr & mask) {
+        /* level triggered */
+        if (level) {
+            s->irr |= mask;
+            s->last_irr |= mask;
+        } else {
+            s->irr &= ~mask;
+            s->last_irr &= ~mask;
+        }
+    } else {
+        /* edge triggered */
+        if (level) {
+            if ((s->last_irr & mask) == 0) {
+                s->irr |= mask;
+           }
+            s->last_irr |= mask;
+        } else {
+            s->last_irr &= ~mask;
+        }
+    }
+}
+
+/* return the highest priority found in mask (highest = smallest
+   number). Return 8 if no irq */
+static inline int get_priority(PicState *s, int mask)
+{
+    int priority;
+    if (mask == 0)
+        return 8;
+    priority = 0;
+    while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
+        priority++;
+    return priority;
+}
+
+/* return the pic wanted interrupt. return -1 if none */
+static int pic_get_irq(PicState *s)
+{
+    int mask, cur_priority, priority;
+
+    mask = s->irr & ~s->imr;
+    priority = get_priority(s, mask);
+    if (priority == 8)
+        return -1;
+    /* compute current priority. If special fully nested mode on the
+       master, the IRQ coming from the slave is not taken into account
+       for the priority computation. */
+    mask = s->isr;
+    if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
+        mask &= ~(1 << 2);
+    cur_priority = get_priority(s, mask);
+    if (priority < cur_priority) {
+        /* higher priority found: an irq should be generated */
+        return (priority + s->priority_add) & 7;
+    } else {
+        return -1;
+    }
+}
+
+/* raise irq to CPU if necessary. must be called every time the active
+   irq may change */
+/* XXX: should not export it, but it is needed for an APIC kludge */
+void pic_update_irq(struct vmx_virpic *s)
+{
+    int irq2, irq;
+
+    /* first look at slave pic */
+    irq2 = pic_get_irq(&s->pics[1]);
+    if (irq2 >= 0) {
+        /* if irq request by slave pic, signal master PIC */
+        pic_set_irq1(&s->pics[0], 2, 1);
+        pic_set_irq1(&s->pics[0], 2, 0);
+    }
+    /* look at requested irq */
+    irq = pic_get_irq(&s->pics[0]);
+    if (irq >= 0) {
+        s->irq_request(s->irq_request_opaque, 1);
+    }
+}
+
+void pic_set_irq_new(void *opaque, int irq, int level)
+{
+    struct vmx_virpic *s = opaque;
+
+    pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
+    /* used for IOAPIC irqs */
+    if (s->alt_irq_func)
+        s->alt_irq_func(s->alt_irq_opaque, irq, level);
+    pic_update_irq(s);
+}
+
+void do_pic_irqs (struct vmx_virpic *s, uint16_t irqs)
+{
+    s->pics[1].irr |= (uint8_t)(irqs >> 8);
+    s->pics[0].irr |= (uint8_t) irqs;
+    /* TODO for alt_irq_func */
+    pic_update_irq(s);
+}
+
+void do_pic_irqs_clear (struct vmx_virpic *s, uint16_t irqs)
+{
+    s->pics[1].irr &= ~(uint8_t)(irqs >> 8);
+    s->pics[0].irr &= ~(uint8_t) irqs;
+    pic_update_irq(s);
+}
+
+/* obsolete function */
+void pic_set_irq(struct vmx_virpic *isa_pic, int irq, int level)
+{
+    pic_set_irq_new(isa_pic, irq, level);
+}
+
+/* acknowledge interrupt 'irq' */
+static inline void pic_intack(PicState *s, int irq)
+{
+    if (s->auto_eoi) {
+        if (s->rotate_on_auto_eoi)
+            s->priority_add = (irq + 1) & 7;
+    } else {
+        s->isr |= (1 << irq);
+    }
+    /* We don't clear a level sensitive interrupt here */
+    if (!(s->elcr & (1 << irq)))
+        s->irr &= ~(1 << irq);
+}
+
+int pic_read_irq(struct vmx_virpic *s)
+{
+    int irq, irq2, intno;
+
+    irq = pic_get_irq(&s->pics[0]);
+    if (irq >= 0) {
+        pic_intack(&s->pics[0], irq);
+        if (irq == 2) {
+            irq2 = pic_get_irq(&s->pics[1]);
+            if (irq2 >= 0) {
+                pic_intack(&s->pics[1], irq2);
+            } else {
+                /* spurious IRQ on slave controller */
+                irq2 = 7;
+            }
+            intno = s->pics[1].irq_base + irq2;
+            irq = irq2 + 8;
+        } else {
+            intno = s->pics[0].irq_base + irq;
+        }
+    } else {
+        /* spurious IRQ on host controller */
+        printk("spurious IRQ irq got=%d\n",irq);
+        irq = 7;
+        intno = s->pics[0].irq_base + irq;
+    }
+    pic_update_irq(s);
+        
+    return intno;
+}
+
+static void update_shared_irr(struct vmx_virpic *s, PicState *c)
+{
+    uint8_t *pl, *pe;
+
+    get_sp(current->domain)->sp_global.pic_elcr = 
+               s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
+    pl =(uint8_t*)&get_sp(current->domain)->sp_global.pic_last_irr;
+    pe =(uint8_t*)&get_sp(current->domain)->sp_global.pic_elcr;
+    if ( c == &s->pics[0] ) {
+         *pl = c->last_irr;
+         *pe = c->elcr;
+    }
+    else {
+         *(pl+1) = c->last_irr;
+         *(pe+1) = c->elcr;
+    }
+}
+
+static void pic_reset(void *opaque)
+{
+    PicState *s = opaque;
+
+    s->last_irr = 0;
+    s->irr = 0;
+    s->imr = 0;
+    s->isr = 0;
+    s->priority_add = 0;
+    s->irq_base = 0;
+    s->read_reg_select = 0;
+    s->poll = 0;
+    s->special_mask = 0;
+    s->init_state = 0;
+    s->auto_eoi = 0;
+    s->rotate_on_auto_eoi = 0;
+    s->special_fully_nested_mode = 0;
+    s->init4 = 0;
+    s->elcr = 0;
+}
+
+static void pic_ioport_write(void *opaque, uint32_t addr, uint32_t val)
+{
+    PicState *s = opaque;
+    int priority, cmd, irq;
+
+    addr &= 1;
+    if (addr == 0) {
+        if (val & 0x10) {
+            /* init */
+            pic_reset(s);
+            update_shared_irr(s->pics_state, s);
+            /* deassert a pending interrupt */
+            s->pics_state->irq_request(s->pics_state->irq_request_opaque, 0);
+            s->init_state = 1;
+            s->init4 = val & 1;
+            if (val & 0x02)
+                hw_error("single mode not supported");
+            if (val & 0x08)
+                hw_error("level sensitive irq not supported");
+        } else if (val & 0x08) {
+            if (val & 0x04)
+                s->poll = 1;
+            if (val & 0x02)
+                s->read_reg_select = val & 1;
+            if (val & 0x40)
+                s->special_mask = (val >> 5) & 1;
+        } else {
+            cmd = val >> 5;
+            switch(cmd) {
+            case 0:
+            case 4:
+                s->rotate_on_auto_eoi = cmd >> 2;
+                break;
+            case 1: /* end of interrupt */
+            case 5:
+                priority = get_priority(s, s->isr);
+                if (priority != 8) {
+                    irq = (priority + s->priority_add) & 7;
+                    s->isr &= ~(1 << irq);
+                    if (cmd == 5)
+                        s->priority_add = (irq + 1) & 7;
+                    pic_update_irq(s->pics_state);
+                }
+                break;
+            case 3:
+                irq = val & 7;
+                s->isr &= ~(1 << irq);
+                pic_update_irq(s->pics_state);
+                break;
+            case 6:
+                s->priority_add = (val + 1) & 7;
+                pic_update_irq(s->pics_state);
+                break;
+            case 7:
+                irq = val & 7;
+                s->isr &= ~(1 << irq);
+                s->priority_add = (irq + 1) & 7;
+                pic_update_irq(s->pics_state);
+                break;
+            default:
+                /* no operation */
+                break;
+            }
+        }
+    } else {
+        switch(s->init_state) {
+        case 0:
+            /* normal mode */
+            s->imr = val;
+            pic_update_irq(s->pics_state);
+            break;
+        case 1:
+            s->irq_base = val & 0xf8;
+            s->init_state = 2;
+            break;
+        case 2:
+            if (s->init4) {
+                s->init_state = 3;
+            } else {
+                s->init_state = 0;
+            }
+            break;
+        case 3:
+            s->special_fully_nested_mode = (val >> 4) & 1;
+            s->auto_eoi = (val >> 1) & 1;
+            s->init_state = 0;
+            break;
+        }
+    }
+}
+
+static uint32_t pic_poll_read (PicState *s, uint32_t addr1)
+{
+    int ret;
+
+    ret = pic_get_irq(s);
+    if (ret >= 0) {
+        if (addr1 >> 7) {
+            s->pics_state->pics[0].isr &= ~(1 << 2);
+            s->pics_state->pics[0].irr &= ~(1 << 2);
+        }
+        s->irr &= ~(1 << ret);
+        s->isr &= ~(1 << ret);
+        if (addr1 >> 7 || ret != 2)
+            pic_update_irq(s->pics_state);
+    } else {
+        ret = 0x07;
+        pic_update_irq(s->pics_state);
+    }
+
+    return ret;
+}
+
+static uint32_t pic_ioport_read(void *opaque, uint32_t addr1)
+{
+    PicState *s = opaque;
+    unsigned int addr;
+    int ret;
+
+    addr = addr1;
+    addr &= 1;
+    if (s->poll) {
+        ret = pic_poll_read(s, addr1);
+        s->poll = 0;
+    } else {
+        if (addr == 0) {
+            if (s->read_reg_select)
+                ret = s->isr;
+            else
+                ret = s->irr;
+        } else {
+            ret = s->imr;
+        }
+    }
+    return ret;
+}
+
+/* memory mapped interrupt status */
+/* XXX: may be the same than pic_read_irq() */
+uint32_t pic_intack_read(struct vmx_virpic *s)
+{
+    int ret;
+
+    ret = pic_poll_read(&s->pics[0], 0x00);
+    if (ret == 2)
+        ret = pic_poll_read(&s->pics[1], 0x80) + 8;
+    /* Prepare for ISR read */
+    s->pics[0].read_reg_select = 1;
+    
+    return ret;
+}
+
+static void elcr_ioport_write(void *opaque, uint32_t addr, uint32_t val)
+{
+    PicState *s = opaque;
+    s->elcr = val & s->elcr_mask;
+}
+
+static uint32_t elcr_ioport_read(void *opaque, uint32_t addr1)
+{
+    PicState *s = opaque;
+    return s->elcr;
+}
+
+/* XXX: add generic master/slave system */
+static void pic_init1(int io_addr, int elcr_addr, PicState *s)
+{
+    pic_reset(s);
+}
+
+void pic_init(struct vmx_virpic *s, void (*irq_request)(), 
+              void *irq_request_opaque)
+{
+    memset(s, 0, sizeof(*s));
+    pic_init1(0x20, 0x4d0, &s->pics[0]);
+    pic_init1(0xa0, 0x4d1, &s->pics[1]);
+    s->pics[0].elcr_mask = 0xf8;
+    s->pics[1].elcr_mask = 0xde;
+    s->irq_request = irq_request;
+    s->irq_request_opaque = irq_request_opaque;
+    s->pics[0].pics_state = s;
+    s->pics[1].pics_state = s;
+    return; 
+}
+
+void pic_set_alt_irq_func(struct vmx_virpic *s, void (*alt_irq_func)(),
+                          void *alt_irq_opaque)
+{
+    s->alt_irq_func = alt_irq_func;
+    s->alt_irq_opaque = alt_irq_opaque;
+}
+
+static int intercept_pic_io(ioreq_t *p)
+{
+    struct vmx_virpic  *pic;
+    struct vcpu *v = current;
+    uint32_t data;
+    
+    if ( p->size != 1 || p->count != 1) {
+        printk("PIC_IO wrong access size %d!\n", (int)p->size);
+        return 1;
+    }
+    pic = &v->domain->arch.vmx_platform.vmx_pic;
+    if ( p->dir == 0 ) {
+        if(p->pdata_valid) 
+            vmx_copy(&data, (unsigned long)p->u.pdata, p->size, VMX_COPY_IN);
+        else
+            data = p->u.data;
+        pic_ioport_write((void*)&pic->pics[p->addr>>7],
+                (uint32_t) p->addr, (uint32_t) (data & 0xff));
+    }
+    else {
+        data = pic_ioport_read(
+            (void*)&pic->pics[p->addr>>7], (uint32_t) p->addr);
+        if(p->pdata_valid) 
+            vmx_copy(&data, (unsigned long)p->u.pdata, p->size, VMX_COPY_OUT);
+        else 
+            p->u.data = (u64)data;
+    }
+    return 1;
+}
+
+static int intercept_elcr_io(ioreq_t *p)
+{
+    struct vmx_virpic  *s;
+    struct vcpu *v = current;
+    uint32_t data;
+    
+    if ( p->size != 1 || p->count != 1 ) {
+        printk("PIC_IO wrong access size %d!\n", (int)p->size);
+        return 1;
+    }
+
+    s = &v->domain->arch.vmx_platform.vmx_pic;
+    if ( p->dir == 0 ) {
+        if(p->pdata_valid) 
+            vmx_copy(&data, (unsigned long)p->u.pdata, p->size, VMX_COPY_IN);
+        else
+            data = p->u.data;
+        elcr_ioport_write((void*)&s->pics[p->addr&1],
+                (uint32_t) p->addr, (uint32_t)( data & 0xff));
+       get_sp(current->domain)->sp_global.pic_elcr = 
+            s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
+    }
+    else {
+        data = (u64) elcr_ioport_read(
+                (void*)&s->pics[p->addr&1], (uint32_t) p->addr);
+        if(p->pdata_valid) 
+            vmx_copy(&data, (unsigned long)p->u.pdata, p->size, VMX_COPY_OUT);
+        else 
+            p->u.data = (u64)data;
+
+    }
+    return 1;
+}
+void register_pic_io_hook (void)
+{
+    register_portio_handler(0x20, 2, intercept_pic_io); 
+    register_portio_handler(0x4d0, 1, intercept_elcr_io); 
+    register_portio_handler(0xa0, 2, intercept_pic_io); 
+    register_portio_handler(0x4d1, 1, intercept_elcr_io); 
+}
+
+
+/* IRQ handling */
+int cpu_get_pic_interrupt(struct vcpu *v, int *type)
+{
+    int intno;
+    struct vmx_virpic *s = &v->domain->arch.vmx_platform.vmx_pic;
+    
+    /* read the irq from the PIC */
+    intno = pic_read_irq(s);
+    *type = VLAPIC_DELIV_MODE_EXT;
+    return intno;
+}
+
+int is_pit_irq(struct vcpu *v, int irq)
+{
+    int  pit_vec = v->domain->arch.vmx_platform.vmx_pic.pics[0].irq_base;
+
+    return (irq == pit_vec);
+}
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/vmx_vpic.h
--- /dev/null   Fri Oct 21 19:58:39 2005
+++ b/xen/include/public/io/vmx_vpic.h  Mon Oct 24 15:08:13 2005
@@ -0,0 +1,84 @@
+/*
+ * QEMU System Emulator header
+ * 
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2005 Intel Corp
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef _VMX_VPIC_H
+#define _VMX_VPIC_H
+
+#define hw_error(x)  do {} while (0);
+
+
+/* i8259.c */
+typedef struct IOAPICState IOAPICState;
+typedef struct PicState {
+    uint8_t last_irr; /* edge detection */
+    uint8_t irr; /* interrupt request register */
+    uint8_t imr; /* interrupt mask register */
+    uint8_t isr; /* interrupt service register */
+    uint8_t priority_add; /* highest irq priority */
+    uint8_t irq_base;
+    uint8_t read_reg_select;
+    uint8_t poll;
+    uint8_t special_mask;
+    uint8_t init_state;
+    uint8_t auto_eoi;
+    uint8_t rotate_on_auto_eoi;
+    uint8_t special_fully_nested_mode;
+    uint8_t init4; /* true if 4 byte init */
+    uint8_t elcr; /* PIIX edge/trigger selection*/
+    uint8_t elcr_mask;
+    struct vmx_virpic *pics_state;
+} PicState;
+
+struct vmx_virpic {
+    /* 0 is master pic, 1 is slave pic */
+    /* XXX: better separation between the two pics */
+    PicState pics[2];
+    void (*irq_request)(int *opaque, int level);
+    void *irq_request_opaque;
+    /* IOAPIC callback support */
+    void (*alt_irq_func)(void *opaque, int irq_num, int level);
+    void *alt_irq_opaque;
+};
+
+
+void pic_set_irq(struct vmx_virpic *s, int irq, int level);
+void pic_set_irq_new(void *opaque, int irq, int level);
+void pic_init(struct vmx_virpic *s, 
+              void (*irq_request)(),
+              void *irq_request_opaque);
+void pic_set_alt_irq_func(struct vmx_virpic *s, 
+                          void(*alt_irq_func)(),
+                          void *alt_irq_opaque);
+int pic_read_irq(struct vmx_virpic *s);
+void pic_update_irq(struct vmx_virpic *s);
+uint32_t pic_intack_read(struct vmx_virpic *s);
+void register_pic_io_hook (void);
+int cpu_get_pic_interrupt(struct vcpu *v, int *type);
+int is_pit_irq(struct vcpu *v, int irq);
+void do_pic_irqs (struct vmx_virpic *s, uint16_t irqs);
+void do_pic_irqs_clear (struct vmx_virpic *s, uint16_t irqs);
+
+/* APIC */
+#endif  /* _VMX_VPIC_H */  
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c       Fri Oct 21 
19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,1625 +0,0 @@
-/*
- *     x86 SMP booting functions
- *
- *     (c) 1995 Alan Cox, Building #3 <alan@xxxxxxxxxx>
- *     (c) 1998, 1999, 2000 Ingo Molnar <mingo@xxxxxxxxxx>
- *
- *     Much of the core SMP work is based on previous work by Thomas Radke, to
- *     whom a great many thanks are extended.
- *
- *     Thanks to Intel for making available several different Pentium,
- *     Pentium Pro and Pentium-II/Xeon MP machines.
- *     Original development of Linux SMP code supported by Caldera.
- *
- *     This code is released under the GNU General Public License version 2 or
- *     later.
- *
- *     Fixes
- *             Felix Koop      :       NR_CPUS used properly
- *             Jose Renau      :       Handle single CPU case.
- *             Alan Cox        :       By repeated request 8) - Total BogoMIPS 
report.
- *             Greg Wright     :       Fix for kernel stacks panic.
- *             Erich Boleyn    :       MP v1.4 and additional changes.
- *     Matthias Sattler        :       Changes for 2.1 kernel map.
- *     Michel Lespinasse       :       Changes for 2.1 kernel map.
- *     Michael Chastain        :       Change trampoline.S to gnu as.
- *             Alan Cox        :       Dumb bug: 'B' step PPro's are fine
- *             Ingo Molnar     :       Added APIC timers, based on code
- *                                     from Jose Renau
- *             Ingo Molnar     :       various cleanups and rewrites
- *             Tigran Aivazian :       fixed "0.00 in /proc/uptime on SMP" bug.
- *     Maciej W. Rozycki       :       Bits for genuine 82489DX APICs
- *             Martin J. Bligh :       Added support for multi-quad systems
- *             Dave Jones      :       Report invalid combinations of Athlon 
CPUs.
-*              Rusty Russell   :       Hacked into shape for new "hotplug" 
boot process. */
-
-#include <linux/module.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/smp_lock.h>
-#include <linux/irq.h>
-#include <linux/bootmem.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/percpu.h>
-
-#include <linux/delay.h>
-#include <linux/mc146818rtc.h>
-#include <asm/tlbflush.h>
-#include <asm/desc.h>
-#include <asm/arch_hooks.h>
-
-#include <asm/smp_alt.h>
-
-#ifndef CONFIG_X86_IO_APIC
-#define Dprintk(args...)
-#endif
-#include <mach_wakecpu.h>
-#include <smpboot_hooks.h>
-
-#include <asm-xen/evtchn.h>
-#include <asm-xen/xen-public/vcpu.h>
-
-/* Set if we find a B stepping CPU */
-static int __initdata smp_b_stepping;
-
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
-EXPORT_SYMBOL(phys_proc_id);
-int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
-EXPORT_SYMBOL(cpu_core_id);
-
-/* bitmap of online cpus */
-cpumask_t cpu_online_map;
-
-cpumask_t cpu_callin_map;
-cpumask_t cpu_callout_map;
-static cpumask_t smp_commenced_mask;
-
-/* Per CPU bogomips and other parameters */
-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-
-u8 x86_cpu_to_apicid[NR_CPUS] =
-                       { [0 ... NR_CPUS-1] = 0xff };
-EXPORT_SYMBOL(x86_cpu_to_apicid);
-
-#if 0
-/*
- * Trampoline 80x86 program as an array.
- */
-
-extern unsigned char trampoline_data [];
-extern unsigned char trampoline_end  [];
-static unsigned char *trampoline_base;
-static int trampoline_exec;
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-/* State of each CPU. */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
-#endif
-
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static char resched_name[NR_CPUS][15];
-static char callfunc_name[NR_CPUS][15];
-
-#if 0
-/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
-
-static unsigned long __init setup_trampoline(void)
-{
-       memcpy(trampoline_base, trampoline_data, trampoline_end - 
trampoline_data);
-       return virt_to_phys(trampoline_base);
-}
-#endif
-
-static void map_cpu_to_logical_apicid(void);
-
-/*
- * We are called very early to get the low memory for the
- * SMP bootup trampoline page.
- */
-void __init smp_alloc_memory(void)
-{
-#if 0
-       trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
-       /*
-        * Has to be in very low memory so we can execute
-        * real-mode AP code.
-        */
-       if (__pa(trampoline_base) >= 0x9F000)
-               BUG();
-       /*
-        * Make the SMP trampoline executable:
-        */
-       trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
-#endif
-}
-
-/*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
-
-static void __init smp_store_cpu_info(int id)
-{
-       struct cpuinfo_x86 *c = cpu_data + id;
-
-       *c = boot_cpu_data;
-       if (id!=0)
-               identify_cpu(c);
-       /*
-        * Mask B, Pentium, but not Pentium MMX
-        */
-       if (c->x86_vendor == X86_VENDOR_INTEL &&
-           c->x86 == 5 &&
-           c->x86_mask >= 1 && c->x86_mask <= 4 &&
-           c->x86_model <= 3)
-               /*
-                * Remember we have B step Pentia with bugs
-                */
-               smp_b_stepping = 1;
-
-       /*
-        * Certain Athlons might work (for various values of 'work') in SMP
-        * but they are not certified as MP capable.
-        */
-       if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
-               /* Athlon 660/661 is valid. */  
-               if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
-                       goto valid_k7;
-
-               /* Duron 670 is valid */
-               if ((c->x86_model==7) && (c->x86_mask==0))
-                       goto valid_k7;
-
-               /*
-                * Athlon 662, Duron 671, and Athlon >model 7 have capability 
bit.
-                * It's worth noting that the A5 stepping (662) of some Athlon 
XP's
-                * have the MP bit set.
-                * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 
more.
-                */
-               if (((c->x86_model==6) && (c->x86_mask>=2)) ||
-                   ((c->x86_model==7) && (c->x86_mask>=1)) ||
-                    (c->x86_model> 7))
-                       if (cpu_has_mp)
-                               goto valid_k7;
-
-               /* If we get here, it's not a certified SMP capable AMD system. 
*/
-               tainted |= TAINT_UNSAFE_SMP;
-       }
-
-valid_k7:
-       ;
-}
-
-#if 0
-/*
- * TSC synchronization.
- *
- * We first check whether all CPUs have their TSC's synchronized,
- * then we print a warning if not, and always resync.
- */
-
-static atomic_t tsc_start_flag = ATOMIC_INIT(0);
-static atomic_t tsc_count_start = ATOMIC_INIT(0);
-static atomic_t tsc_count_stop = ATOMIC_INIT(0);
-static unsigned long long tsc_values[NR_CPUS];
-
-#define NR_LOOPS 5
-
-static void __init synchronize_tsc_bp (void)
-{
-       int i;
-       unsigned long long t0;
-       unsigned long long sum, avg;
-       long long delta;
-       unsigned long one_usec;
-       int buggy = 0;
-
-       printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", 
num_booting_cpus());
-
-       /* convert from kcyc/sec to cyc/usec */
-       one_usec = cpu_khz / 1000;
-
-       atomic_set(&tsc_start_flag, 1);
-       wmb();
-
-       /*
-        * We loop a few times to get a primed instruction cache,
-        * then the last pass is more or less synchronized and
-        * the BP and APs set their cycle counters to zero all at
-        * once. This reduces the chance of having random offsets
-        * between the processors, and guarantees that the maximum
-        * delay between the cycle counters is never bigger than
-        * the latency of information-passing (cachelines) between
-        * two CPUs.
-        */
-       for (i = 0; i < NR_LOOPS; i++) {
-               /*
-                * all APs synchronize but they loop on '== num_cpus'
-                */
-               while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
-                       mb();
-               atomic_set(&tsc_count_stop, 0);
-               wmb();
-               /*
-                * this lets the APs save their current TSC:
-                */
-               atomic_inc(&tsc_count_start);
-
-               rdtscll(tsc_values[smp_processor_id()]);
-               /*
-                * We clear the TSC in the last loop:
-                */
-               if (i == NR_LOOPS-1)
-                       write_tsc(0, 0);
-
-               /*
-                * Wait for all APs to leave the synchronization point:
-                */
-               while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
-                       mb();
-               atomic_set(&tsc_count_start, 0);
-               wmb();
-               atomic_inc(&tsc_count_stop);
-       }
-
-       sum = 0;
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_isset(i, cpu_callout_map)) {
-                       t0 = tsc_values[i];
-                       sum += t0;
-               }
-       }
-       avg = sum;
-       do_div(avg, num_booting_cpus());
-
-       sum = 0;
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_isset(i, cpu_callout_map))
-                       continue;
-               delta = tsc_values[i] - avg;
-               if (delta < 0)
-                       delta = -delta;
-               /*
-                * We report bigger than 2 microseconds clock differences.
-                */
-               if (delta > 2*one_usec) {
-                       long realdelta;
-                       if (!buggy) {
-                               buggy = 1;
-                               printk("\n");
-                       }
-                       realdelta = delta;
-                       do_div(realdelta, one_usec);
-                       if (tsc_values[i] < avg)
-                               realdelta = -realdelta;
-
-                       printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed 
it up.\n", i, realdelta);
-               }
-
-               sum += delta;
-       }
-       if (!buggy)
-               printk("passed.\n");
-}
-
-static void __init synchronize_tsc_ap (void)
-{
-       int i;
-
-       /*
-        * Not every cpu is online at the time
-        * this gets called, so we first wait for the BP to
-        * finish SMP initialization:
-        */
-       while (!atomic_read(&tsc_start_flag)) mb();
-
-       for (i = 0; i < NR_LOOPS; i++) {
-               atomic_inc(&tsc_count_start);
-               while (atomic_read(&tsc_count_start) != num_booting_cpus())
-                       mb();
-
-               rdtscll(tsc_values[smp_processor_id()]);
-               if (i == NR_LOOPS-1)
-                       write_tsc(0, 0);
-
-               atomic_inc(&tsc_count_stop);
-               while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
-       }
-}
-#undef NR_LOOPS
-#endif
-
-extern void calibrate_delay(void);
-
-static atomic_t init_deasserted;
-
-static void __init smp_callin(void)
-{
-       int cpuid, phys_id;
-       unsigned long timeout;
-
-#if 0
-       /*
-        * If waken up by an INIT in an 82489DX configuration
-        * we may get here before an INIT-deassert IPI reaches
-        * our local APIC.  We have to wait for the IPI or we'll
-        * lock up on an APIC access.
-        */
-       wait_for_init_deassert(&init_deasserted);
-#endif
-
-       /*
-        * (This works even if the APIC is not enabled.)
-        */
-       phys_id = smp_processor_id();
-       cpuid = smp_processor_id();
-       if (cpu_isset(cpuid, cpu_callin_map)) {
-               printk("huh, phys CPU#%d, CPU#%d already present??\n",
-                                       phys_id, cpuid);
-               BUG();
-       }
-       Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
-
-       /*
-        * STARTUP IPIs are fragile beasts as they might sometimes
-        * trigger some glue motherboard logic. Complete APIC bus
-        * silence for 1 second, this overestimates the time the
-        * boot CPU is spending to send the up to 2 STARTUP IPIs
-        * by a factor of two. This should be enough.
-        */
-
-       /*
-        * Waiting 2s total for startup (udelay is not yet working)
-        */
-       timeout = jiffies + 2*HZ;
-       while (time_before(jiffies, timeout)) {
-               /*
-                * Has the boot CPU finished it's STARTUP sequence?
-                */
-               if (cpu_isset(cpuid, cpu_callout_map))
-                       break;
-               rep_nop();
-       }
-
-       if (!time_before(jiffies, timeout)) {
-               printk("BUG: CPU%d started up but did not get a callout!\n",
-                       cpuid);
-               BUG();
-       }
-
-#if 0
-       /*
-        * the boot CPU has finished the init stage and is spinning
-        * on callin_map until we finish. We are free to set up this
-        * CPU, first the APIC. (this is probably redundant on most
-        * boards)
-        */
-
-       Dprintk("CALLIN, before setup_local_APIC().\n");
-       smp_callin_clear_local_apic();
-       setup_local_APIC();
-#endif
-       map_cpu_to_logical_apicid();
-
-       /*
-        * Get our bogomips.
-        */
-       calibrate_delay();
-       Dprintk("Stack at about %p\n",&cpuid);
-
-       /*
-        * Save our processor parameters
-        */
-       smp_store_cpu_info(cpuid);
-
-#if 0
-       disable_APIC_timer();
-#endif
-
-       /*
-        * Allow the master to continue.
-        */
-       cpu_set(cpuid, cpu_callin_map);
-
-#if 0
-       /*
-        *      Synchronize the TSC with the BP
-        */
-       if (cpu_has_tsc && cpu_khz)
-               synchronize_tsc_ap();
-#endif
-}
-
-static int cpucount;
-
-extern void local_setup_timer(void);
-
-/*
- * Activate a secondary processor.
- */
-static void __init start_secondary(void *unused)
-{
-       /*
-        * Dont put anything before smp_callin(), SMP
-        * booting is too fragile that we want to limit the
-        * things done here to the most necessary things.
-        */
-       cpu_init();
-       smp_callin();
-       while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
-               rep_nop();
-       local_setup_timer();
-       smp_intr_init();
-       local_irq_enable();
-       /*
-        * low-memory mappings have been cleared, flush them from
-        * the local TLBs too.
-        */
-       local_flush_tlb();
-       cpu_set(smp_processor_id(), cpu_online_map);
-
-       /* We can take interrupts now: we're officially "up". */
-       local_irq_enable();
-
-       wmb();
-       cpu_idle();
-}
-
-/*
- * Everything has been set up for the secondary
- * CPUs - they just need to reload everything
- * from the task structure
- * This function must not return.
- */
-void __init initialize_secondary(void)
-{
-       /*
-        * We don't actually need to load the full TSS,
-        * basically just the stack pointer and the eip.
-        */
-
-       asm volatile(
-               "movl %0,%%esp\n\t"
-               "jmp *%1"
-               :
-               :"r" (current->thread.esp),"r" (current->thread.eip));
-}
-
-extern struct {
-       void * esp;
-       unsigned short ss;
-} stack_start;
-
-#ifdef CONFIG_NUMA
-
-/* which logical CPUs are on which nodes */
-cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
-                               { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
-/* which node each logical CPU is on */
-int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
-EXPORT_SYMBOL(cpu_2_node);
-
-/* set up a mapping between cpu and node. */
-static inline void map_cpu_to_node(int cpu, int node)
-{
-       printk("Mapping cpu %d to node %d\n", cpu, node);
-       cpu_set(cpu, node_2_cpu_mask[node]);
-       cpu_2_node[cpu] = node;
-}
-
-/* undo a mapping between cpu and node. */
-static inline void unmap_cpu_to_node(int cpu)
-{
-       int node;
-
-       printk("Unmapping cpu %d from all nodes\n", cpu);
-       for (node = 0; node < MAX_NUMNODES; node ++)
-               cpu_clear(cpu, node_2_cpu_mask[node]);
-       cpu_2_node[cpu] = 0;
-}
-#else /* !CONFIG_NUMA */
-
-#define map_cpu_to_node(cpu, node)     ({})
-#define unmap_cpu_to_node(cpu) ({})
-
-#endif /* CONFIG_NUMA */
-
-u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-
-static void map_cpu_to_logical_apicid(void)
-{
-       int cpu = smp_processor_id();
-       int apicid = smp_processor_id();
-
-       cpu_2_logical_apicid[cpu] = apicid;
-       map_cpu_to_node(cpu, apicid_to_node(apicid));
-}
-
-static void unmap_cpu_to_logical_apicid(int cpu)
-{
-       cpu_2_logical_apicid[cpu] = BAD_APICID;
-       unmap_cpu_to_node(cpu);
-}
-
-#if APIC_DEBUG
-static inline void __inquire_remote_apic(int apicid)
-{
-       int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
-       char *names[] = { "ID", "VERSION", "SPIV" };
-       int timeout, status;
-
-       printk("Inquiring remote APIC #%d...\n", apicid);
-
-       for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
-               printk("... APIC #%d %s: ", apicid, names[i]);
-
-               /*
-                * Wait for idle.
-                */
-               apic_wait_icr_idle();
-
-               apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
-               apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
-
-               timeout = 0;
-               do {
-                       udelay(100);
-                       status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
-               } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
-
-               switch (status) {
-               case APIC_ICR_RR_VALID:
-                       status = apic_read(APIC_RRR);
-                       printk("%08x\n", status);
-                       break;
-               default:
-                       printk("failed\n");
-               }
-       }
-}
-#endif
-
-#if 0
-#ifdef WAKE_SECONDARY_VIA_NMI
-/* 
- * Poke the other CPU in the eye via NMI to wake it up. Remember that the 
normal
- * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
- * won't ... remember to clear down the APIC, etc later.
- */
-static int __init
-wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
-{
-       unsigned long send_status = 0, accept_status = 0;
-       int timeout, maxlvt;
-
-       /* Target chip */
-       apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
-
-       /* Boot on the stack */
-       /* Kick the second */
-       apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
-
-       Dprintk("Waiting for send to finish...\n");
-       timeout = 0;
-       do {
-               Dprintk("+");
-               udelay(100);
-               send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-       } while (send_status && (timeout++ < 1000));
-
-       /*
-        * Give the other CPU some time to accept the IPI.
-        */
-       udelay(200);
-       /*
-        * Due to the Pentium erratum 3AP.
-        */
-       maxlvt = get_maxlvt();
-       if (maxlvt > 3) {
-               apic_read_around(APIC_SPIV);
-               apic_write(APIC_ESR, 0);
-       }
-       accept_status = (apic_read(APIC_ESR) & 0xEF);
-       Dprintk("NMI sent.\n");
-
-       if (send_status)
-               printk("APIC never delivered???\n");
-       if (accept_status)
-               printk("APIC delivery error (%lx).\n", accept_status);
-
-       return (send_status | accept_status);
-}
-#endif /* WAKE_SECONDARY_VIA_NMI */
-
-#ifdef WAKE_SECONDARY_VIA_INIT
-static int __init
-wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
-{
-       unsigned long send_status = 0, accept_status = 0;
-       int maxlvt, timeout, num_starts, j;
-
-       /*
-        * Be paranoid about clearing APIC errors.
-        */
-       if (APIC_INTEGRATED(apic_version[phys_apicid])) {
-               apic_read_around(APIC_SPIV);
-               apic_write(APIC_ESR, 0);
-               apic_read(APIC_ESR);
-       }
-
-       Dprintk("Asserting INIT.\n");
-
-       /*
-        * Turn INIT on target chip
-        */
-       apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-       /*
-        * Send IPI
-        */
-       apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
-                               | APIC_DM_INIT);
-
-       Dprintk("Waiting for send to finish...\n");
-       timeout = 0;
-       do {
-               Dprintk("+");
-               udelay(100);
-               send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-       } while (send_status && (timeout++ < 1000));
-
-       mdelay(10);
-
-       Dprintk("Deasserting INIT.\n");
-
-       /* Target chip */
-       apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-       /* Send IPI */
-       apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
-
-       Dprintk("Waiting for send to finish...\n");
-       timeout = 0;
-       do {
-               Dprintk("+");
-               udelay(100);
-               send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-       } while (send_status && (timeout++ < 1000));
-
-       atomic_set(&init_deasserted, 1);
-
-       /*
-        * Should we send STARTUP IPIs ?
-        *
-        * Determine this based on the APIC version.
-        * If we don't have an integrated APIC, don't send the STARTUP IPIs.
-        */
-       if (APIC_INTEGRATED(apic_version[phys_apicid]))
-               num_starts = 2;
-       else
-               num_starts = 0;
-
-       /*
-        * Run STARTUP IPI loop.
-        */
-       Dprintk("#startup loops: %d.\n", num_starts);
-
-       maxlvt = get_maxlvt();
-
-       for (j = 1; j <= num_starts; j++) {
-               Dprintk("Sending STARTUP #%d.\n",j);
-               apic_read_around(APIC_SPIV);
-               apic_write(APIC_ESR, 0);
-               apic_read(APIC_ESR);
-               Dprintk("After apic_write.\n");
-
-               /*
-                * STARTUP IPI
-                */
-
-               /* Target chip */
-               apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-               /* Boot on the stack */
-               /* Kick the second */
-               apic_write_around(APIC_ICR, APIC_DM_STARTUP
-                                       | (start_eip >> 12));
-
-               /*
-                * Give the other CPU some time to accept the IPI.
-                */
-               udelay(300);
-
-               Dprintk("Startup point 1.\n");
-
-               Dprintk("Waiting for send to finish...\n");
-               timeout = 0;
-               do {
-                       Dprintk("+");
-                       udelay(100);
-                       send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-               } while (send_status && (timeout++ < 1000));
-
-               /*
-                * Give the other CPU some time to accept the IPI.
-                */
-               udelay(200);
-               /*
-                * Due to the Pentium erratum 3AP.
-                */
-               if (maxlvt > 3) {
-                       apic_read_around(APIC_SPIV);
-                       apic_write(APIC_ESR, 0);
-               }
-               accept_status = (apic_read(APIC_ESR) & 0xEF);
-               if (send_status || accept_status)
-                       break;
-       }
-       Dprintk("After Startup.\n");
-
-       if (send_status)
-               printk("APIC never delivered???\n");
-       if (accept_status)
-               printk("APIC delivery error (%lx).\n", accept_status);
-
-       return (send_status | accept_status);
-}
-#endif /* WAKE_SECONDARY_VIA_INIT */
-#endif
-
-extern cpumask_t cpu_initialized;
-
-static int __init do_boot_cpu(int apicid)
-/*
- * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
- * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
- * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
- */
-{
-       struct task_struct *idle;
-       unsigned long boot_error;
-       int timeout, cpu;
-       unsigned long start_eip;
-#if 0
-       unsigned short nmi_high = 0, nmi_low = 0;
-#endif
-       vcpu_guest_context_t ctxt;
-       extern void startup_32_smp(void);
-       extern void hypervisor_callback(void);
-       extern void failsafe_callback(void);
-       extern void smp_trap_init(trap_info_t *);
-
-       cpu = ++cpucount;
-       /*
-        * We can't use kernel_thread since we must avoid to
-        * reschedule the child.
-        */
-       idle = fork_idle(cpu);
-       if (IS_ERR(idle))
-               panic("failed fork for CPU %d", cpu);
-       idle->thread.eip = (unsigned long) start_secondary;
-       /* start_eip had better be page-aligned! */
-       start_eip = (unsigned long)startup_32_smp;
-
-       /* So we see what's up   */
-       printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
-       /* Stack for startup_32 can be just as for start_secondary onwards */
-       stack_start.esp = (void *) idle->thread.esp;
-
-       irq_ctx_init(cpu);
-
-       /*
-        * This grunge runs the startup process for
-        * the targeted processor.
-        */
-
-       atomic_set(&init_deasserted, 0);
-
-#if 1
-       cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL|__GFP_ZERO);
-       BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
-       cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
-       memcpy((void *)cpu_gdt_descr[cpu].address,
-              (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
-
-       memset(&ctxt, 0, sizeof(ctxt));
-
-       ctxt.user_regs.ds = __USER_DS;
-       ctxt.user_regs.es = __USER_DS;
-       ctxt.user_regs.fs = 0;
-       ctxt.user_regs.gs = 0;
-       ctxt.user_regs.ss = __KERNEL_DS;
-       ctxt.user_regs.cs = __KERNEL_CS;
-       ctxt.user_regs.eip = start_eip;
-       ctxt.user_regs.esp = idle->thread.esp;
-#define X86_EFLAGS_IOPL_RING1 0x1000
-       ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING1;
-
-       /* FPU is set up to default initial state. */
-       memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
-
-       smp_trap_init(ctxt.trap_ctxt);
-
-       /* No LDT. */
-       ctxt.ldt_ents = 0;
-
-       {
-               unsigned long va;
-               int f;
-
-               for (va = cpu_gdt_descr[cpu].address, f = 0;
-                    va < cpu_gdt_descr[cpu].address + cpu_gdt_descr[cpu].size;
-                    va += PAGE_SIZE, f++) {
-                       ctxt.gdt_frames[f] = virt_to_mfn(va);
-                       make_page_readonly((void *)va);
-               }
-               ctxt.gdt_ents = cpu_gdt_descr[cpu].size / 8;
-       }
-
-       /* Ring 1 stack is the initial stack. */
-       ctxt.kernel_ss = __KERNEL_DS;
-       ctxt.kernel_sp = idle->thread.esp;
-
-       /* Callback handlers. */
-       ctxt.event_callback_cs     = __KERNEL_CS;
-       ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
-       ctxt.failsafe_callback_cs  = __KERNEL_CS;
-       ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-
-       ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
-
-       boot_error = HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt);
-       if (boot_error)
-               printk("boot error: %ld\n", boot_error);
-
-       if (!boot_error) {
-               HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
-
-               /*
-                * allow APs to start initializing.
-                */
-               Dprintk("Before Callout %d.\n", cpu);
-               cpu_set(cpu, cpu_callout_map);
-               Dprintk("After Callout %d.\n", cpu);
-
-               /*
-                * Wait 5s total for a response
-                */
-               for (timeout = 0; timeout < 50000; timeout++) {
-                       if (cpu_isset(cpu, cpu_callin_map))
-                               break;  /* It has booted */
-                       udelay(100);
-               }
-
-               if (cpu_isset(cpu, cpu_callin_map)) {
-                       /* number CPUs logically, starting from 1 (BSP is 0) */
-                       Dprintk("OK.\n");
-                       printk("CPU%d: ", cpu);
-                       print_cpu_info(&cpu_data[cpu]);
-                       Dprintk("CPU has booted.\n");
-               } else {
-                       boot_error= 1;
-               }
-       }
-       x86_cpu_to_apicid[cpu] = apicid;
-       if (boot_error) {
-               /* Try to put things back the way they were before ... */
-               unmap_cpu_to_logical_apicid(cpu);
-               cpu_clear(cpu, cpu_callout_map); /* was set here 
(do_boot_cpu()) */
-               cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
-               cpucount--;
-       }
-
-#else
-       Dprintk("Setting warm reset code and vector.\n");
-
-       store_NMI_vector(&nmi_high, &nmi_low);
-
-       smpboot_setup_warm_reset_vector(start_eip);
-
-       /*
-        * Starting actual IPI sequence...
-        */
-       boot_error = wakeup_secondary_cpu(apicid, start_eip);
-
-       if (!boot_error) {
-               /*
-                * allow APs to start initializing.
-                */
-               Dprintk("Before Callout %d.\n", cpu);
-               cpu_set(cpu, cpu_callout_map);
-               Dprintk("After Callout %d.\n", cpu);
-
-               /*
-                * Wait 5s total for a response
-                */
-               for (timeout = 0; timeout < 50000; timeout++) {
-                       if (cpu_isset(cpu, cpu_callin_map))
-                               break;  /* It has booted */
-                       udelay(100);
-               }
-
-               if (cpu_isset(cpu, cpu_callin_map)) {
-                       /* number CPUs logically, starting from 1 (BSP is 0) */
-                       Dprintk("OK.\n");
-                       printk("CPU%d: ", cpu);
-                       print_cpu_info(&cpu_data[cpu]);
-                       Dprintk("CPU has booted.\n");
-               } else {
-                       boot_error= 1;
-                       if (*((volatile unsigned char *)trampoline_base)
-                                       == 0xA5)
-                               /* trampoline started but...? */
-                               printk("Stuck ??\n");
-                       else
-                               /* trampoline code not run */
-                               printk("Not responding.\n");
-                       inquire_remote_apic(apicid);
-               }
-       }
-       x86_cpu_to_apicid[cpu] = apicid;
-       if (boot_error) {
-               /* Try to put things back the way they were before ... */
-               unmap_cpu_to_logical_apicid(cpu);
-               cpu_clear(cpu, cpu_callout_map); /* was set here 
(do_boot_cpu()) */
-               cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
-               cpucount--;
-       }
-
-       /* mark "stuck" area as not stuck */
-       *((volatile unsigned long *)trampoline_base) = 0;
-#endif
-
-       return boot_error;
-}
-
-static void smp_tune_scheduling (void)
-{
-       unsigned long cachesize;       /* kB   */
-       unsigned long bandwidth = 350; /* MB/s */
-       /*
-        * Rough estimation for SMP scheduling, this is the number of
-        * cycles it takes for a fully memory-limited process to flush
-        * the SMP-local cache.
-        *
-        * (For a P5 this pretty much means we will choose another idle
-        *  CPU almost always at wakeup time (this is due to the small
-        *  L1 cache), on PIIs it's around 50-100 usecs, depending on
-        *  the cache size)
-        */
-
-       if (!cpu_khz) {
-               /*
-                * this basically disables processor-affinity
-                * scheduling on SMP without a TSC.
-                */
-               return;
-       } else {
-               cachesize = boot_cpu_data.x86_cache_size;
-               if (cachesize == -1) {
-                       cachesize = 16; /* Pentiums, 2x8kB cache */
-                       bandwidth = 100;
-               }
-       }
-}
-
-/*
- * Cycle through the processors sending APIC IPIs to boot each.
- */
-
-#if 0
-static int boot_cpu_logical_apicid;
-#endif
-/* Where the IO area was mapped on multiquad, always 0 otherwise */
-void *xquad_portio;
-
-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_core_map);
-
-static void __init smp_boot_cpus(unsigned int max_cpus)
-{
-       int cpu, kicked;
-       unsigned long bogosum = 0;
-#if 0
-       int apicid, bit;
-#endif
-
-       /*
-        * Setup boot CPU information
-        */
-       smp_store_cpu_info(0); /* Final full version of the data */
-       printk("CPU%d: ", 0);
-       print_cpu_info(&cpu_data[0]);
-
-#if 0
-       boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-       boot_cpu_logical_apicid = logical_smp_processor_id();
-       x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
-#else
-       // boot_cpu_physical_apicid = 0;
-       // boot_cpu_logical_apicid = 0;
-       x86_cpu_to_apicid[0] = 0;
-#endif
-
-       current_thread_info()->cpu = 0;
-       smp_tune_scheduling();
-       cpus_clear(cpu_sibling_map[0]);
-       cpu_set(0, cpu_sibling_map[0]);
-
-       cpus_clear(cpu_core_map[0]);
-       cpu_set(0, cpu_core_map[0]);
-
-#ifdef CONFIG_X86_IO_APIC
-       /*
-        * If we couldn't find an SMP configuration at boot time,
-        * get out of here now!
-        */
-       if (!smp_found_config && !acpi_lapic) {
-               printk(KERN_NOTICE "SMP motherboard not detected.\n");
-               smpboot_clear_io_apic_irqs();
-#if 0
-               phys_cpu_present_map = physid_mask_of_physid(0);
-#endif
-#ifdef CONFIG_X86_LOCAL_APIC
-               if (APIC_init_uniprocessor())
-                       printk(KERN_NOTICE "Local APIC not detected."
-                                          " Using dummy APIC emulation.\n");
-#endif
-               map_cpu_to_logical_apicid();
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
-               return;
-       }
-#endif
-
-#if 0
-       /*
-        * Should not be necessary because the MP table should list the boot
-        * CPU too, but we do it for the sake of robustness anyway.
-        * Makes no sense to do this check in clustered apic mode, so skip it
-        */
-       if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
-               printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
-                               boot_cpu_physical_apicid);
-               physid_set(hard_smp_processor_id(), phys_cpu_present_map);
-       }
-
-       /*
-        * If we couldn't find a local APIC, then get out of here now!
-        */
-       if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && 
!cpu_has_apic) {
-               printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
-                       boot_cpu_physical_apicid);
-               printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell 
your hw vendor)\n");
-               smpboot_clear_io_apic_irqs();
-               phys_cpu_present_map = physid_mask_of_physid(0);
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
-               return;
-       }
-
-       verify_local_APIC();
-#endif
-
-       /*
-        * If SMP should be disabled, then really disable it!
-        */
-       if (!max_cpus) {
-               HYPERVISOR_shared_info->n_vcpu = 1;
-               printk(KERN_INFO "SMP mode deactivated, forcing use of dummy 
APIC emulation.\n");
-               smpboot_clear_io_apic_irqs();
-#if 0
-               phys_cpu_present_map = physid_mask_of_physid(0);
-#endif
-               return;
-       }
-
-       smp_intr_init();
-
-#if 0
-       connect_bsp_APIC();
-       setup_local_APIC();
-#endif
-       map_cpu_to_logical_apicid();
-#if 0
-
-
-       setup_portio_remap();
-
-       /*
-        * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
-        *
-        * In clustered apic mode, phys_cpu_present_map is a constructed thus:
-        * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
-        * clustered apic ID.
-        */
-       Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
-#endif
-       Dprintk("CPU present map: %lx\n",
-               (1UL << HYPERVISOR_shared_info->n_vcpu) - 1);
-
-       kicked = 1;
-       for (cpu = 1; kicked < NR_CPUS &&
-                    cpu < HYPERVISOR_shared_info->n_vcpu; cpu++) {
-               if (max_cpus <= cpucount+1)
-                       continue;
-
-#ifdef CONFIG_SMP_ALTERNATIVES
-               if (kicked == 1)
-                       prepare_for_smp();
-#endif
-               if (do_boot_cpu(cpu))
-                       printk("CPU #%d not responding - cannot use it.\n",
-                                                               cpu);
-               else
-                       ++kicked;
-       }
-
-#if 0
-       /*
-        * Cleanup possible dangling ends...
-        */
-       smpboot_restore_warm_reset_vector();
-#endif
-
-       /*
-        * Allow the user to impress friends.
-        */
-       Dprintk("Before bogomips.\n");
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
-               if (cpu_isset(cpu, cpu_callout_map))
-                       bogosum += cpu_data[cpu].loops_per_jiffy;
-       printk(KERN_INFO
-               "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
-               cpucount+1,
-               bogosum/(500000/HZ),
-               (bogosum/(5000/HZ))%100);
-       
-       Dprintk("Before bogocount - setting activated=1.\n");
-
-       if (smp_b_stepping)
-               printk(KERN_WARNING "WARNING: SMP operation may be unreliable 
with B stepping processors.\n");
-
-       /*
-        * Don't taint if we are running SMP kernel on a single non-MP
-        * approved Athlon
-        */
-       if (tainted & TAINT_UNSAFE_SMP) {
-               if (cpucount)
-                       printk (KERN_INFO "WARNING: This combination of AMD 
processors is not suitable for SMP.\n");
-               else
-                       tainted &= ~TAINT_UNSAFE_SMP;
-       }
-
-       Dprintk("Boot done.\n");
-
-       /*
-        * construct cpu_sibling_map[], so that we can tell sibling CPUs
-        * efficiently.
-        */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
-       }
-
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               struct cpuinfo_x86 *c = cpu_data + cpu;
-               int siblings = 0;
-               int i;
-               if (!cpu_isset(cpu, cpu_callout_map))
-                       continue;
-
-               if (smp_num_siblings > 1) {
-                       for (i = 0; i < NR_CPUS; i++) {
-                               if (!cpu_isset(i, cpu_callout_map))
-                                       continue;
-                               if (cpu_core_id[cpu] == cpu_core_id[i]) {
-                                       siblings++;
-                                       cpu_set(i, cpu_sibling_map[cpu]);
-                               }
-                       }
-               } else {
-                       siblings++;
-                       cpu_set(cpu, cpu_sibling_map[cpu]);
-               }
-
-               if (siblings != smp_num_siblings) {
-                       printk(KERN_WARNING "WARNING: %d siblings found for 
CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
-                       smp_num_siblings = siblings;
-               }
-
-               if (c->x86_num_cores > 1) {
-                       for (i = 0; i < NR_CPUS; i++) {
-                               if (!cpu_isset(i, cpu_callout_map))
-                                       continue;
-                               if (phys_proc_id[cpu] == phys_proc_id[i]) {
-                                       cpu_set(i, cpu_core_map[cpu]);
-                               }
-                       }
-               } else {
-                       cpu_core_map[cpu] = cpu_sibling_map[cpu];
-               }
-       }
-
-       smpboot_setup_io_apic();
-
-#if 0
-       setup_boot_APIC_clock();
-
-       /*
-        * Synchronize the TSC with the AP
-        */
-       if (cpu_has_tsc && cpucount && cpu_khz)
-               synchronize_tsc_bp();
-#endif
-}
-
-/* These are wrappers to interface to the new boot process.  Someone
-   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
-       smp_commenced_mask = cpumask_of_cpu(0);
-       cpu_callin_map = cpumask_of_cpu(0);
-       mb();
-       smp_boot_cpus(max_cpus);
-}
-
-void __devinit smp_prepare_boot_cpu(void)
-{
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), cpu_callout_map);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-#include <asm-xen/xenbus.h>
-/* hotplug down/up funtion pointer and target vcpu */
-struct vcpu_hotplug_handler_t {
-       void (*fn) (int vcpu);
-       u32 vcpu;
-};
-static struct vcpu_hotplug_handler_t vcpu_hotplug_handler;
-
-static int vcpu_hotplug_cpu_process(void *unused)
-{
-       struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
-
-       if (handler->fn) {
-               (*(handler->fn)) (handler->vcpu);
-               handler->fn = NULL;
-       }
-       return 0;
-}
-
-static void __vcpu_hotplug_handler(void *unused)
-{
-       int err;
-
-       err = kernel_thread(vcpu_hotplug_cpu_process,
-                           NULL, CLONE_FS | CLONE_FILES);
-       if (err < 0)
-               printk(KERN_ALERT "Error creating hotplug_cpu process!\n");
-}
-
-static void handle_vcpu_hotplug_event(struct xenbus_watch *, const char *);
-static struct notifier_block xsn_cpu;
-
-/* xenbus watch struct */
-static struct xenbus_watch cpu_watch = {
-       .node = "cpu",
-       .callback = handle_vcpu_hotplug_event
-};
-
-static int setup_cpu_watcher(struct notifier_block *notifier,
-                             unsigned long event, void *data)
-{
-       int err;
-
-       err = register_xenbus_watch(&cpu_watch);
-       if (err)
-               printk("Failed to register watch on /cpu\n");
-
-       return NOTIFY_DONE;
-}
-
-static void handle_vcpu_hotplug_event(struct xenbus_watch *watch, const char 
*node)
-{
-       static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL);
-       struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
-       ssize_t ret;
-       int err, cpu;
-       char state[8];
-       char dir[32];
-       char *cpustr;
-
-       /* get a pointer to start of cpu string */
-       if ((cpustr = strstr(node, "cpu/")) != NULL) {
-
-               /* find which cpu state changed, note vcpu for handler */
-               sscanf(cpustr, "cpu/%d", &cpu);
-               handler->vcpu = cpu;
-
-               /* calc the dir for xenbus read */
-               sprintf(dir, "cpu/%d", cpu);
-
-               /* make sure watch that was triggered is changes to the correct 
key */
-               if ((strcmp(node + strlen(dir), "/availability")) != 0)
-                       return;
-
-               /* get the state value */
-               err = xenbus_scanf(NULL, dir, "availability", "%s", state);
-
-               if (err != 1) {
-                       printk(KERN_ERR
-                              "XENBUS: Unable to read cpu state\n");
-                       return;
-               }
-
-               /* if we detect a state change, take action */
-               if (strcmp(state, "online") == 0) {
-                       /* offline -> online */
-                       if (!cpu_isset(cpu, cpu_online_map)) {
-                               handler->fn = (void *)&cpu_up;
-                               ret = schedule_work(&vcpu_hotplug_work);
-                       } 
-               } else if (strcmp(state, "offline") == 0) {
-                       /* online -> offline */
-                       if (cpu_isset(cpu, cpu_online_map)) {
-                               handler->fn = (void *)&cpu_down;
-                               ret = schedule_work(&vcpu_hotplug_work);
-                       } 
-               } else {
-                       printk(KERN_ERR
-                              "XENBUS: unknown state(%s) on node(%s)\n", state,
-                              node);
-               }
-       }
-       return;
-}
-
-static int __init setup_vcpu_hotplug_event(void)
-{
-       xsn_cpu.notifier_call = setup_cpu_watcher;
-
-       register_xenstore_notifier(&xsn_cpu);
-
-       return 0;
-}
-
-subsys_initcall(setup_vcpu_hotplug_event);
-
-/* must be called with the cpucontrol mutex held */
-static int __devinit cpu_enable(unsigned int cpu)
-{
-#ifdef CONFIG_SMP_ALTERNATIVES
-       if (num_online_cpus() == 1)
-               prepare_for_smp();
-#endif
-
-       /* get the target out of its holding state */
-       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-       wmb();
-
-       /* wait for the processor to ack it. timeout? */
-       while (!cpu_online(cpu))
-               cpu_relax();
-
-       fixup_irqs(cpu_online_map);
-
-       /* counter the disable in fixup_irqs() */
-       local_irq_enable();
-       return 0;
-}
-
-int __cpu_disable(void)
-{
-       cpumask_t map = cpu_online_map;
-       int cpu = smp_processor_id();
-
-       /*
-        * Perhaps use cpufreq to drop frequency, but that could go
-        * into generic code.
-        *
-        * We won't take down the boot processor on i386 due to some
-        * interrupts only being able to be serviced by the BSP.
-        * Especially so if we're not using an IOAPIC   -zwane
-        */
-       if (cpu == 0)
-               return -EBUSY;
-
-       cpu_clear(cpu, map);
-       fixup_irqs(map);
-
-       /* It's now safe to remove this processor from the online map */
-       cpu_clear(cpu, cpu_online_map);
-
-#ifdef CONFIG_SMP_ALTERNATIVES
-       if (num_online_cpus() == 1)
-               unprepare_for_smp();
-#endif
-
-       return 0;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-       /* We don't do anything here: idle task is faking death itself. */
-       unsigned int i;
-
-       for (i = 0; i < 10; i++) {
-               /* They ack this in play_dead by setting CPU_DEAD */
-               if (per_cpu(cpu_state, cpu) == CPU_DEAD)
-                       return;
-               current->state = TASK_UNINTERRUPTIBLE;
-               schedule_timeout(HZ/10);
-       }
-       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-}
-
-#else /* ... !CONFIG_HOTPLUG_CPU */
-int __cpu_disable(void)
-{
-       return -ENOSYS;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-       /* We said "no" in __cpu_disable */
-       BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-int __devinit __cpu_up(unsigned int cpu)
-{
-       /* In case one didn't come up */
-       if (!cpu_isset(cpu, cpu_callin_map)) {
-               printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
-               local_irq_enable();
-               return -EIO;
-       }
-
-#ifdef CONFIG_HOTPLUG_CPU
-#ifdef CONFIG_XEN
-       /* Tell hypervisor to bring vcpu up. */
-       HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
-#endif
-       /* Already up, and in cpu_quiescent now? */
-       if (cpu_isset(cpu, smp_commenced_mask)) {
-               cpu_enable(cpu);
-               return 0;
-       }
-#endif
-
-       local_irq_enable();
-       /* Unleash the CPU! */
-       cpu_set(cpu, smp_commenced_mask);
-       while (!cpu_isset(cpu, cpu_online_map))
-               mb();
-       return 0;
-}
-
-void __init smp_cpus_done(unsigned int max_cpus)
-{
-#if 1
-#else
-#ifdef CONFIG_X86_IO_APIC
-       setup_ioapic_dest();
-#endif
-       zap_low_mappings();
-       /*
-        * Disable executability of the SMP trampoline:
-        */
-       set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
-#endif
-}
-
-extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
-extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
-
-void smp_intr_init(void)
-{
-       int cpu = smp_processor_id();
-
-       per_cpu(resched_irq, cpu) =
-               bind_ipi_to_irq(RESCHEDULE_VECTOR);
-       sprintf(resched_name[cpu], "resched%d", cpu);
-       BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
-                          SA_INTERRUPT, resched_name[cpu], NULL));
-
-       per_cpu(callfunc_irq, cpu) =
-               bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
-       sprintf(callfunc_name[cpu], "callfunc%d", cpu);
-       BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
-                          smp_call_function_interrupt,
-                          SA_INTERRUPT, callfunc_name[cpu], NULL));
-}
-
-static void smp_intr_exit(void)
-{
-       int cpu = smp_processor_id();
-
-       free_irq(per_cpu(resched_irq, cpu), NULL);
-       unbind_ipi_from_irq(RESCHEDULE_VECTOR);
-
-       free_irq(per_cpu(callfunc_irq, cpu), NULL);
-       unbind_ipi_from_irq(CALL_FUNCTION_VECTOR);
-}
-
-extern void local_setup_timer_irq(void);
-extern void local_teardown_timer_irq(void);
-
-void smp_suspend(void)
-{
-       local_teardown_timer_irq();
-       smp_intr_exit();
-}
-
-void smp_resume(void)
-{
-       smp_intr_init();
-       local_setup_timer();
-}
-
-void vcpu_prepare(int vcpu)
-{
-       extern void hypervisor_callback(void);
-       extern void failsafe_callback(void);
-       extern void smp_trap_init(trap_info_t *);
-       extern void cpu_restore(void);
-       vcpu_guest_context_t ctxt;
-       struct task_struct *idle = idle_task(vcpu);
-
-       if (vcpu == 0)
-               return;
-
-       memset(&ctxt, 0, sizeof(ctxt));
-
-       ctxt.user_regs.ds = __USER_DS;
-       ctxt.user_regs.es = __USER_DS;
-       ctxt.user_regs.fs = 0;
-       ctxt.user_regs.gs = 0;
-       ctxt.user_regs.ss = __KERNEL_DS;
-       ctxt.user_regs.cs = __KERNEL_CS;
-       ctxt.user_regs.eip = (unsigned long)cpu_restore;
-       ctxt.user_regs.esp = idle->thread.esp;
-       ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING1;
-
-       memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
-
-       smp_trap_init(ctxt.trap_ctxt);
-
-       ctxt.ldt_ents = 0;
-
-       ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
-       ctxt.gdt_ents      = cpu_gdt_descr[vcpu].size / 8;
-
-       ctxt.kernel_ss = __KERNEL_DS;
-       ctxt.kernel_sp = idle->thread.esp0;
-
-       ctxt.event_callback_cs     = __KERNEL_CS;
-       ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
-       ctxt.failsafe_callback_cs  = __KERNEL_CS;
-       ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-
-       ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
-
-       (void)HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt);
-       (void)HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL);
-}
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c     Fri Oct 21 
19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,1270 +0,0 @@
-/*
- *     x86 SMP booting functions
- *
- *     (c) 1995 Alan Cox, Building #3 <alan@xxxxxxxxxx>
- *     (c) 1998, 1999, 2000 Ingo Molnar <mingo@xxxxxxxxxx>
- *     Copyright 2001 Andi Kleen, SuSE Labs.
- *
- *     Much of the core SMP work is based on previous work by Thomas Radke, to
- *     whom a great many thanks are extended.
- *
- *     Thanks to Intel for making available several different Pentium,
- *     Pentium Pro and Pentium-II/Xeon MP machines.
- *     Original development of Linux SMP code supported by Caldera.
- *
- *     This code is released under the GNU General Public License version 2
- *
- *     Fixes
- *             Felix Koop      :       NR_CPUS used properly
- *             Jose Renau      :       Handle single CPU case.
- *             Alan Cox        :       By repeated request 8) - Total BogoMIP 
report.
- *             Greg Wright     :       Fix for kernel stacks panic.
- *             Erich Boleyn    :       MP v1.4 and additional changes.
- *     Matthias Sattler        :       Changes for 2.1 kernel map.
- *     Michel Lespinasse       :       Changes for 2.1 kernel map.
- *     Michael Chastain        :       Change trampoline.S to gnu as.
- *             Alan Cox        :       Dumb bug: 'B' step PPro's are fine
- *             Ingo Molnar     :       Added APIC timers, based on code
- *                                     from Jose Renau
- *             Ingo Molnar     :       various cleanups and rewrites
- *             Tigran Aivazian :       fixed "0.00 in /proc/uptime on SMP" bug.
- *     Maciej W. Rozycki       :       Bits for genuine 82489DX APICs
- *     Andi Kleen              :       Changed for SMP boot into long mode.
- *             Rusty Russell   :       Hacked into shape for new "hotplug" 
boot process.
- *      Andi Kleen              :       Converted to new state machine.
- *                                     Various cleanups.
- *                                     Probably mostly hotplug CPU ready now.
- */
-
-
-#include <linux/config.h>
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/smp_lock.h>
-#include <linux/irq.h>
-#include <linux/bootmem.h>
-#include <linux/thread_info.h>
-#include <linux/module.h>
-#ifdef CONFIG_XEN
-#include <linux/interrupt.h>
-#endif
-
-#include <linux/delay.h>
-#include <linux/mc146818rtc.h>
-#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
-#include <asm/desc.h>
-#include <asm/kdebug.h>
-#include <asm/tlbflush.h>
-#include <asm/proto.h>
-#include <asm/nmi.h>
-#ifdef CONFIG_XEN
-#include <asm/arch_hooks.h>
-#include <asm-xen/evtchn.h>
-#include <asm-xen/xen-public/vcpu.h>
-#endif
-
-/* Change for real CPU hotplug. Note other files need to be fixed
-   first too. */
-#define __cpuinit __init
-#define __cpuinitdata __initdata
-
-#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
-       unsigned int maxcpus = NR_CPUS;
-#endif
-
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-/* Package ID of each logical CPU */
-u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-EXPORT_SYMBOL(phys_proc_id);
-EXPORT_SYMBOL(cpu_core_id);
-
-/* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map;
-
-EXPORT_SYMBOL(cpu_online_map);
-
-/*
- * Private maps to synchronize booting between AP and BP.
- * Probably not needed anymore, but it makes for easier debugging. -AK
- */
-cpumask_t cpu_callin_map;
-cpumask_t cpu_callout_map;
-
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-/* Per CPU bogomips and other parameters */
-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-
-/* Set when the idlers are all forked */
-int smp_threads_ready;
-
-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_core_map);
-
-#ifndef CONFIG_XEN
-/*
- * Trampoline 80x86 program as an array.
- */
-
-extern unsigned char trampoline_data[];
-extern unsigned char trampoline_end[];
-
-/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
-
-static unsigned long __cpuinit setup_trampoline(void)
-{
-       void *tramp = __va(SMP_TRAMPOLINE_BASE); 
-       memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
-       return virt_to_phys(tramp);
-}
-#endif
-
-/*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
-
-static void __cpuinit smp_store_cpu_info(int id)
-{
-       struct cpuinfo_x86 *c = cpu_data + id;
-
-       *c = boot_cpu_data;
-       identify_cpu(c);
-       print_cpu_info(c);
-}
-
-#ifndef CONFIG_XEN
-/*
- * New Funky TSC sync algorithm borrowed from IA64.
- * Main advantage is that it doesn't reset the TSCs fully and
- * in general looks more robust and it works better than my earlier
- * attempts. I believe it was written by David Mosberger. Some minor
- * adjustments for x86-64 by me -AK
- *
- * Original comment reproduced below.
- *
- * Synchronize TSC of the current (slave) CPU with the TSC of the
- * MASTER CPU (normally the time-keeper CPU).  We use a closed loop to
- * eliminate the possibility of unaccounted-for errors (such as
- * getting a machine check in the middle of a calibration step).  The
- * basic idea is for the slave to ask the master what itc value it has
- * and to read its own itc before and after the master responds.  Each
- * iteration gives us three timestamps:
- *
- *     slave           master
- *
- *     t0 ---\
- *             ---\
- *                --->
- *                     tm
- *                /---
- *            /---
- *     t1 <---
- *
- *
- * The goal is to adjust the slave's TSC such that tm falls exactly
- * half-way between t0 and t1.  If we achieve this, the clocks are
- * synchronized provided the interconnect between the slave and the
- * master is symmetric.  Even if the interconnect were asymmetric, we
- * would still know that the synchronization error is smaller than the
- * roundtrip latency (t0 - t1).
- *
- * When the interconnect is quiet and symmetric, this lets us
- * synchronize the TSC to within one or two cycles.  However, we can
- * only *guarantee* that the synchronization is accurate to within a
- * round-trip time, which is typically in the range of several hundred
- * cycles (e.g., ~500 cycles).  In practice, this means that the TSCs
- * are usually almost perfectly synchronized, but we shouldn't assume
- * that the accuracy is much better than half a micro second or so.
- *
- * [there are other errors like the latency of RDTSC and of the
- * WRMSR. These can also account to hundreds of cycles. So it's
- * probably worse. It claims 153 cycles error on a dual Opteron,
- * but I suspect the numbers are actually somewhat worse -AK]
- */
-
-#define MASTER 0
-#define SLAVE  (SMP_CACHE_BYTES/8)
-
-/* Intentionally don't use cpu_relax() while TSC synchronization
-   because we don't want to go into funky power save modi or cause
-   hypervisors to schedule us away.  Going to sleep would likely affect
-   latency and low latency is the primary objective here. -AK */
-#define no_cpu_relax() barrier()
-
-static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
-static volatile __cpuinitdata unsigned long go[SLAVE + 1];
-static int notscsync __cpuinitdata;
-
-#undef DEBUG_TSC_SYNC
-
-#define NUM_ROUNDS     64      /* magic value */
-#define NUM_ITERS      5       /* likewise */
-
-/* Callback on boot CPU */
-static __cpuinit void sync_master(void *arg)
-{
-       unsigned long flags, i;
-
-       if (smp_processor_id() != boot_cpu_id)
-               return;
-
-       go[MASTER] = 0;
-
-       local_irq_save(flags);
-       {
-               for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
-                       while (!go[MASTER])
-                               no_cpu_relax();
-                       go[MASTER] = 0;
-                       rdtscll(go[SLAVE]);
-               }
-       }
-       local_irq_restore(flags);
-}
-
-/*
- * Return the number of cycles by which our tsc differs from the tsc
- * on the master (time-keeper) CPU.  A positive number indicates our
- * tsc is ahead of the master, negative that it is behind.
- */
-static inline long
-get_delta(long *rt, long *master)
-{
-       unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
-       unsigned long tcenter, t0, t1, tm;
-       int i;
-
-       for (i = 0; i < NUM_ITERS; ++i) {
-               rdtscll(t0);
-               go[MASTER] = 1;
-               while (!(tm = go[SLAVE]))
-                       no_cpu_relax();
-               go[SLAVE] = 0;
-               rdtscll(t1);
-
-               if (t1 - t0 < best_t1 - best_t0)
-                       best_t0 = t0, best_t1 = t1, best_tm = tm;
-       }
-
-       *rt = best_t1 - best_t0;
-       *master = best_tm - best_t0;
-
-       /* average best_t0 and best_t1 without overflow: */
-       tcenter = (best_t0/2 + best_t1/2);
-       if (best_t0 % 2 + best_t1 % 2 == 2)
-               ++tcenter;
-       return tcenter - best_tm;
-}
-
-static __cpuinit void sync_tsc(void)
-{
-       int i, done = 0;
-       long delta, adj, adjust_latency = 0;
-       unsigned long flags, rt, master_time_stamp, bound;
-#if DEBUG_TSC_SYNC
-       static struct syncdebug {
-               long rt;        /* roundtrip time */
-               long master;    /* master's timestamp */
-               long diff;      /* difference between midpoint and master's 
timestamp */
-               long lat;       /* estimate of tsc adjustment latency */
-       } t[NUM_ROUNDS] __cpuinitdata;
-#endif
-
-       go[MASTER] = 1;
-
-       smp_call_function(sync_master, NULL, 1, 0);
-
-       while (go[MASTER])      /* wait for master to be ready */
-               no_cpu_relax();
-
-       spin_lock_irqsave(&tsc_sync_lock, flags);
-       {
-               for (i = 0; i < NUM_ROUNDS; ++i) {
-                       delta = get_delta(&rt, &master_time_stamp);
-                       if (delta == 0) {
-                               done = 1;       /* let's lock on to this... */
-                               bound = rt;
-                       }
-
-                       if (!done) {
-                               unsigned long t;
-                               if (i > 0) {
-                                       adjust_latency += -delta;
-                                       adj = -delta + adjust_latency/4;
-                               } else
-                                       adj = -delta;
-
-                               rdtscll(t);
-                               wrmsrl(MSR_IA32_TSC, t + adj);
-                       }
-#if DEBUG_TSC_SYNC
-                       t[i].rt = rt;
-                       t[i].master = master_time_stamp;
-                       t[i].diff = delta;
-                       t[i].lat = adjust_latency/4;
-#endif
-               }
-       }
-       spin_unlock_irqrestore(&tsc_sync_lock, flags);
-
-#if DEBUG_TSC_SYNC
-       for (i = 0; i < NUM_ROUNDS; ++i)
-               printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
-                      t[i].rt, t[i].master, t[i].diff, t[i].lat);
-#endif
-
-       printk(KERN_INFO
-              "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
-              "maxerr %lu cycles)\n",
-              smp_processor_id(), boot_cpu_id, delta, rt);
-}
-
-static void __cpuinit tsc_sync_wait(void)
-{
-       if (notscsync || !cpu_has_tsc)
-               return;
-       printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
-                       boot_cpu_id);
-       sync_tsc();
-}
-
-static __init int notscsync_setup(char *s)
-{
-       notscsync = 1;
-       return 0;
-}
-__setup("notscsync", notscsync_setup);
-#endif
-
-static atomic_t init_deasserted __cpuinitdata;
-
-/*
- * Report back to the Boot Processor.
- * Running on AP.
- */
-void __cpuinit smp_callin(void)
-{
-       int cpuid, phys_id;
-       unsigned long timeout;
-
-#ifndef CONFIG_XEN
-       /*
-        * If waken up by an INIT in an 82489DX configuration
-        * we may get here before an INIT-deassert IPI reaches
-        * our local APIC.  We have to wait for the IPI or we'll
-        * lock up on an APIC access.
-        */
-       while (!atomic_read(&init_deasserted))
-               cpu_relax();
-
-#endif
-       /*
-        * (This works even if the APIC is not enabled.)
-        */
-#ifndef CONFIG_XEN
-       phys_id = GET_APIC_ID(apic_read(APIC_ID));
-#else
-       phys_id = smp_processor_id();
-#endif
-       cpuid = smp_processor_id();
-       if (cpu_isset(cpuid, cpu_callin_map)) {
-               panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
-                                       phys_id, cpuid);
-       }
-       Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
-
-       /*
-        * STARTUP IPIs are fragile beasts as they might sometimes
-        * trigger some glue motherboard logic. Complete APIC bus
-        * silence for 1 second, this overestimates the time the
-        * boot CPU is spending to send the up to 2 STARTUP IPIs
-        * by a factor of two. This should be enough.
-        */
-
-       /*
-        * Waiting 2s total for startup (udelay is not yet working)
-        */
-       timeout = jiffies + 2*HZ;
-       while (time_before(jiffies, timeout)) {
-               /*
-                * Has the boot CPU finished it's STARTUP sequence?
-                */
-               if (cpu_isset(cpuid, cpu_callout_map))
-                       break;
-               cpu_relax();
-       }
-
-       if (!time_before(jiffies, timeout)) {
-               panic("smp_callin: CPU%d started up but did not get a 
callout!\n",
-                       cpuid);
-       }
-
-#ifndef CONFIG_XEN
-       /*
-        * the boot CPU has finished the init stage and is spinning
-        * on callin_map until we finish. We are free to set up this
-        * CPU, first the APIC. (this is probably redundant on most
-        * boards)
-        */
-
-       Dprintk("CALLIN, before setup_local_APIC().\n");
-       setup_local_APIC();
-#endif
-
-       /*
-        * Get our bogomips.
-        */
-       calibrate_delay();
-       Dprintk("Stack at about %p\n",&cpuid);
-
-#ifndef CONFIG_XEN
-       disable_APIC_timer();
-#endif
-
-       /*
-        * Save our processor parameters
-        */
-       smp_store_cpu_info(cpuid);
-
-       /*
-        * Allow the master to continue.
-        */
-       cpu_set(cpuid, cpu_callin_map);
-}
-
-#ifdef CONFIG_XEN
-extern void local_setup_timer(void);
-#endif
-
-/*
- * Setup code on secondary processor (after comming out of the trampoline)
- */
-void __cpuinit start_secondary(void)
-{
-       /*
-        * Dont put anything before smp_callin(), SMP
-        * booting is too fragile that we want to limit the
-        * things done here to the most necessary things.
-        */
-       cpu_init();
-       smp_callin();
-
-       /* otherwise gcc will move up the smp_processor_id before the cpu_init 
*/
-       barrier();
-
-#ifndef CONFIG_XEN
-       Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());         
-       setup_secondary_APIC_clock();
-
-       Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
-
-       if (nmi_watchdog == NMI_IO_APIC) {
-               disable_8259A_irq(0);
-               enable_NMI_through_LVT0(NULL);
-               enable_8259A_irq(0);
-       }
-
-       enable_APIC_timer();
-#else
-       local_setup_timer();
-       smp_intr_init();
-       local_irq_enable();
-#endif
-
-       /*
-        * Allow the master to continue.
-        */
-       cpu_set(smp_processor_id(), cpu_online_map);
-       mb();
-
-#ifndef CONFIG_XEN
-       /* Wait for TSC sync to not schedule things before.
-          We still process interrupts, which could see an inconsistent
-          time in that window unfortunately. */
-       tsc_sync_wait();
-#endif
-
-       cpu_idle();
-}
-
-extern volatile unsigned long init_rsp;
-extern void (*initial_code)(void);
-
-#ifndef CONFIG_XEN
-#if APIC_DEBUG
-static void inquire_remote_apic(int apicid)
-{
-       unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
-       char *names[] = { "ID", "VERSION", "SPIV" };
-       int timeout, status;
-
-       printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
-
-       for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
-               printk("... APIC #%d %s: ", apicid, names[i]);
-
-               /*
-                * Wait for idle.
-                */
-               apic_wait_icr_idle();
-
-               apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
-               apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
-
-               timeout = 0;
-               do {
-                       udelay(100);
-                       status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
-               } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
-
-               switch (status) {
-               case APIC_ICR_RR_VALID:
-                       status = apic_read(APIC_RRR);
-                       printk("%08x\n", status);
-                       break;
-               default:
-                       printk("failed\n");
-               }
-       }
-}
-#endif
-
-/*
- * Kick the secondary to wake up.
- */
-static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int 
start_rip)
-{
-       unsigned long send_status = 0, accept_status = 0;
-       int maxlvt, timeout, num_starts, j;
-
-       Dprintk("Asserting INIT.\n");
-
-       /*
-        * Turn INIT on target chip
-        */
-       apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-       /*
-        * Send IPI
-        */
-       apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
-                               | APIC_DM_INIT);
-
-       Dprintk("Waiting for send to finish...\n");
-       timeout = 0;
-       do {
-               Dprintk("+");
-               udelay(100);
-               send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-       } while (send_status && (timeout++ < 1000));
-
-       mdelay(10);
-
-       Dprintk("Deasserting INIT.\n");
-
-       /* Target chip */
-       apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-       /* Send IPI */
-       apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
-
-       Dprintk("Waiting for send to finish...\n");
-       timeout = 0;
-       do {
-               Dprintk("+");
-               udelay(100);
-               send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-       } while (send_status && (timeout++ < 1000));
-
-       atomic_set(&init_deasserted, 1);
-
-       /*
-        * Should we send STARTUP IPIs ?
-        *
-        * Determine this based on the APIC version.
-        * If we don't have an integrated APIC, don't send the STARTUP IPIs.
-        */
-       if (APIC_INTEGRATED(apic_version[phys_apicid]))
-               num_starts = 2;
-       else
-               num_starts = 0;
-
-       /*
-        * Run STARTUP IPI loop.
-        */
-       Dprintk("#startup loops: %d.\n", num_starts);
-
-       maxlvt = get_maxlvt();
-
-       for (j = 1; j <= num_starts; j++) {
-               Dprintk("Sending STARTUP #%d.\n",j);
-               apic_read_around(APIC_SPIV);
-               apic_write(APIC_ESR, 0);
-               apic_read(APIC_ESR);
-               Dprintk("After apic_write.\n");
-
-               /*
-                * STARTUP IPI
-                */
-
-               /* Target chip */
-               apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
-               /* Boot on the stack */
-               /* Kick the second */
-               apic_write_around(APIC_ICR, APIC_DM_STARTUP
-                                       | (start_rip >> 12));
-
-               /*
-                * Give the other CPU some time to accept the IPI.
-                */
-               udelay(300);
-
-               Dprintk("Startup point 1.\n");
-
-               Dprintk("Waiting for send to finish...\n");
-               timeout = 0;
-               do {
-                       Dprintk("+");
-                       udelay(100);
-                       send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-               } while (send_status && (timeout++ < 1000));
-
-               /*
-                * Give the other CPU some time to accept the IPI.
-                */
-               udelay(200);
-               /*
-                * Due to the Pentium erratum 3AP.
-                */
-               if (maxlvt > 3) {
-                       apic_read_around(APIC_SPIV);
-                       apic_write(APIC_ESR, 0);
-               }
-               accept_status = (apic_read(APIC_ESR) & 0xEF);
-               if (send_status || accept_status)
-                       break;
-       }
-       Dprintk("After Startup.\n");
-
-       if (send_status)
-               printk(KERN_ERR "APIC never delivered???\n");
-       if (accept_status)
-               printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
-
-       return (send_status | accept_status);
-}
-#endif
-
-/*
- * Boot one CPU.
- */
-static int __cpuinit do_boot_cpu(int cpu, int apicid)
-{
-       struct task_struct *idle;
-       unsigned long boot_error;
-       int timeout;
-       unsigned long start_rip;
-#ifdef CONFIG_XEN
-       vcpu_guest_context_t ctxt;
-       extern void startup_64_smp(void);
-       extern void hypervisor_callback(void);
-       extern void failsafe_callback(void);
-       extern void smp_trap_init(trap_info_t *);
-       int i;
-#endif
-       /*
-        * We can't use kernel_thread since we must avoid to
-        * reschedule the child.
-        */
-       idle = fork_idle(cpu);
-       if (IS_ERR(idle)) {
-               printk("failed fork for CPU %d\n", cpu);
-               return PTR_ERR(idle);
-       }
-
-       cpu_pda[cpu].pcurrent = idle;
-
-#ifndef CONFIG_XEN
-       start_rip = setup_trampoline();
-#else
-       start_rip = (unsigned long)startup_64_smp;
-#endif
-
-       init_rsp = idle->thread.rsp;
-       per_cpu(init_tss,cpu).rsp0 = init_rsp;
-       initial_code = start_secondary;
-       clear_ti_thread_flag(idle->thread_info, TIF_FORK);
-
-       printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, 
apicid,
-              start_rip, init_rsp);
-
-       /*
-        * This grunge runs the startup process for
-        * the targeted processor.
-        */
-
-       atomic_set(&init_deasserted, 0);
-
-#ifdef CONFIG_XEN
-       cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL|__GFP_ZERO);
-       BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
-       cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
-       memcpy((void *)cpu_gdt_descr[cpu].address,
-               (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
-
-       memset(&ctxt, 0, sizeof(ctxt));
-
-       ctxt.flags = VGCF_IN_KERNEL;
-       ctxt.user_regs.ds = __USER_DS;
-       ctxt.user_regs.es = __USER_DS;
-       ctxt.user_regs.fs = 0;
-       ctxt.user_regs.gs = 0;
-       ctxt.user_regs.ss = __KERNEL_DS|0x3;
-       ctxt.user_regs.cs = __KERNEL_CS|0x3;
-       ctxt.user_regs.rip = start_rip;
-       ctxt.user_regs.rsp = idle->thread.rsp;
-#define X86_EFLAGS_IOPL_RING3 0x3000
-       ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING3;
-
-       /* FPU is set up to default initial state. */
-       memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
-
-       smp_trap_init(ctxt.trap_ctxt);
-
-       /* No LDT. */
-       ctxt.ldt_ents = 0;
-
-       {
-               unsigned long va;
-               int f;
-
-               for (va = cpu_gdt_descr[cpu].address, f = 0;
-                    va < cpu_gdt_descr[cpu].address + cpu_gdt_descr[cpu].size;
-                    va += PAGE_SIZE, f++) {
-                       ctxt.gdt_frames[f] = virt_to_mfn(va);
-                       make_page_readonly((void *)va);
-               }
-               ctxt.gdt_ents = GDT_ENTRIES;
-       }
-
-       /* Ring 1 stack is the initial stack. */
-       ctxt.kernel_ss = __KERNEL_DS;
-       ctxt.kernel_sp = idle->thread.rsp;
-
-       /* Callback handlers. */
-       ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
-       ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-       ctxt.syscall_callback_eip  = (unsigned long)system_call;
-
-       ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
-
-       boot_error  = HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt);
-       if (boot_error)
-               printk("boot error: %ld\n", boot_error);
-
-       if (!boot_error) {
-               HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
-
-               /*
-                * allow APs to start initializing.
-                */
-               Dprintk("Before Callout %d.\n", cpu);
-               cpu_set(cpu, cpu_callout_map);
-               Dprintk("After Callout %d.\n", cpu);
-
-               /*
-                * Wait 5s total for a response
-                */
-               for (timeout = 0; timeout < 50000; timeout++) {
-                       if (cpu_isset(cpu, cpu_callin_map))
-                               break;  /* It has booted */
-                       udelay(100);
-               }
-
-               if (cpu_isset(cpu, cpu_callin_map)) {
-                       /* number CPUs logically, starting from 1 (BSP is 0) */
-                       Dprintk("CPU has booted.\n");
-               } else {
-                       boot_error= 1;
-               }
-       }
-       x86_cpu_to_apicid[cpu] = apicid;
-#else
-       Dprintk("Setting warm reset code and vector.\n");
-
-       CMOS_WRITE(0xa, 0xf);
-       local_flush_tlb();
-       Dprintk("1.\n");
-       *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
-       Dprintk("2.\n");
-       *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
-       Dprintk("3.\n");
-
-       /*
-        * Be paranoid about clearing APIC errors.
-        */
-       if (APIC_INTEGRATED(apic_version[apicid])) {
-               apic_read_around(APIC_SPIV);
-               apic_write(APIC_ESR, 0);
-               apic_read(APIC_ESR);
-       }
-
-       /*
-        * Status is now clean
-        */
-       boot_error = 0;
-
-       /*
-        * Starting actual IPI sequence...
-        */
-       boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
-
-       if (!boot_error) {
-               /*
-                * allow APs to start initializing.
-                */
-               Dprintk("Before Callout %d.\n", cpu);
-               cpu_set(cpu, cpu_callout_map);
-               Dprintk("After Callout %d.\n", cpu);
-
-               /*
-                * Wait 5s total for a response
-                */
-               for (timeout = 0; timeout < 50000; timeout++) {
-                       if (cpu_isset(cpu, cpu_callin_map))
-                               break;  /* It has booted */
-                       udelay(100);
-               }
-
-               if (cpu_isset(cpu, cpu_callin_map)) {
-                       /* number CPUs logically, starting from 1 (BSP is 0) */
-                       Dprintk("CPU has booted.\n");
-               } else {
-                       boot_error = 1;
-                       if (*((volatile unsigned char 
*)phys_to_virt(SMP_TRAMPOLINE_BASE))
-                                       == 0xA5)
-                               /* trampoline started but...? */
-                               printk("Stuck ??\n");
-                       else
-                               /* trampoline code not run */
-                               printk("Not responding.\n");
-#if APIC_DEBUG
-                       inquire_remote_apic(apicid);
-#endif
-               }
-       }
-#endif
-       if (boot_error) {
-               cpu_clear(cpu, cpu_callout_map); /* was set here 
(do_boot_cpu()) */
-               clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
-               cpu_clear(cpu, cpu_present_map);
-               cpu_clear(cpu, cpu_possible_map);
-               x86_cpu_to_apicid[cpu] = BAD_APICID;
-               x86_cpu_to_log_apicid[cpu] = BAD_APICID;
-               return -EIO;
-       }
-
-       return 0;
-}
-
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
-
-/*
- * Construct cpu_sibling_map[], so that we can tell the sibling CPU
- * on SMT systems efficiently.
- */
-static __cpuinit void detect_siblings(void)
-{
-       int cpu;
-
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
-       }
-
-       for_each_online_cpu (cpu) {
-               struct cpuinfo_x86 *c = cpu_data + cpu;
-               int siblings = 0;
-               int i;
-               if (smp_num_siblings > 1) {
-                       for_each_online_cpu (i) {
-                               if (cpu_core_id[cpu] == cpu_core_id[i]) {
-                                       siblings++;
-                                       cpu_set(i, cpu_sibling_map[cpu]);
-                               }
-                       }
-               } else {
-                       siblings++;
-                       cpu_set(cpu, cpu_sibling_map[cpu]);
-               }
-
-               if (siblings != smp_num_siblings) {
-                       printk(KERN_WARNING
-              "WARNING: %d siblings found for CPU%d, should be %d\n",
-                              siblings, cpu, smp_num_siblings);
-                       smp_num_siblings = siblings;
-               }
-               if (c->x86_num_cores > 1) {
-                       for_each_online_cpu(i) {
-                               if (phys_proc_id[cpu] == phys_proc_id[i])
-                                       cpu_set(i, cpu_core_map[cpu]);
-                       }
-               } else
-                       cpu_core_map[cpu] = cpu_sibling_map[cpu];
-       }
-}
-
-#ifndef CONFIG_XEN
-/*
- * Cleanup possible dangling ends...
- */
-static __cpuinit void smp_cleanup_boot(void)
-{
-       /*
-        * Paranoid:  Set warm reset code and vector here back
-        * to default values.
-        */
-       CMOS_WRITE(0, 0xf);
-
-       /*
-        * Reset trampoline flag
-        */
-       *((volatile int *) phys_to_virt(0x467)) = 0;
-
-#ifndef CONFIG_HOTPLUG_CPU
-       /*
-        * Free pages reserved for SMP bootup.
-        * When you add hotplug CPU support later remove this
-        * Note there is more work to be done for later CPU bootup.
-        */
-
-       free_page((unsigned long) __va(PAGE_SIZE));
-       free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
-#endif
-}
-#endif
-
-/*
- * Fall back to non SMP mode after errors.
- *
- * RED-PEN audit/test this more. I bet there is more state messed up here.
- */
-static __cpuinit void disable_smp(void)
-{
-       cpu_present_map = cpumask_of_cpu(0);
-       cpu_possible_map = cpumask_of_cpu(0);
-#ifndef CONFIG_XEN
-       if (smp_found_config)
-               phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
-       else
-               phys_cpu_present_map = physid_mask_of_physid(0);
-#endif
-       cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
-}
-
-/*
- * Handle user cpus=... parameter.
- */
-static __cpuinit void enforce_max_cpus(unsigned max_cpus)
-{
-       int i, k;
-       k = 0;
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_possible(i))
-                       continue;
-               if (++k > max_cpus) {
-                       cpu_clear(i, cpu_possible_map);
-                       cpu_clear(i, cpu_present_map);
-               }
-       }
-}
-
-/*
- * Various sanity checks.
- */
-static int __cpuinit smp_sanity_check(unsigned max_cpus)
-{
-#ifndef CONFIG_XEN
-       if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
-               printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
-                      hard_smp_processor_id());
-               physid_set(hard_smp_processor_id(), phys_cpu_present_map);
-       }
-
-       /*
-        * If we couldn't find an SMP configuration at boot time,
-        * get out of here now!
-        */
-       if (!smp_found_config) {
-               printk(KERN_NOTICE "SMP motherboard not detected.\n");
-               disable_smp();
-               if (APIC_init_uniprocessor())
-                       printk(KERN_NOTICE "Local APIC not detected."
-                                          " Using dummy APIC emulation.\n");
-               return -1;
-       }
-
-       /*
-        * Should not be necessary because the MP table should list the boot
-        * CPU too, but we do it for the sake of robustness anyway.
-        */
-       if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
-               printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the 
BIOS.\n",
-                                                                boot_cpu_id);
-               physid_set(hard_smp_processor_id(), phys_cpu_present_map);
-       }
-
-       /*
-        * If we couldn't find a local APIC, then get out of here now!
-        */
-       if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
-               printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
-                       boot_cpu_id);
-               printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell 
your hw vendor)\n");
-               nr_ioapics = 0;
-               return -1;
-       }
-#endif
-
-       /*
-        * If SMP should be disabled, then really disable it!
-        */
-       if (!max_cpus) {
-#ifdef CONFIG_XEN
-               HYPERVISOR_shared_info->n_vcpu = 1;
-#endif
-               printk(KERN_INFO "SMP mode deactivated, forcing use of dummy 
APIC emulation.\n");
-#ifndef CONFIG_XEN
-               nr_ioapics = 0;
-#endif
-               return -1;
-       }
-
-       return 0;
-}
-
-/*
- * Prepare for SMP bootup.  The MP table or ACPI has been read
- * earlier.  Just do some sanity checking here and enable APIC mode.
- */
-void __cpuinit smp_prepare_cpus(unsigned int max_cpus)
-{
-       int i;
-
-#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
-#else
-       nmi_watchdog_default();
-#endif
-       current_cpu_data = boot_cpu_data;
-       current_thread_info()->cpu = 0;  /* needed? */
-
-       enforce_max_cpus(max_cpus);
-
-       /*
-        * Fill in cpu_present_mask
-        */
-       for (i = 0; i < NR_CPUS; i++) {
-#ifndef CONFIG_XEN
-               int apicid = cpu_present_to_apicid(i);
-               if (physid_isset(apicid, phys_cpu_present_map)) {
-#else
-               if (i < HYPERVISOR_shared_info->n_vcpu) {
-#endif
-                       cpu_set(i, cpu_present_map);
-                       /* possible map would be different if we supported real
-                          CPU hotplug. */
-                       cpu_set(i, cpu_possible_map);
-               }
-       }
-
-       if (smp_sanity_check(max_cpus) < 0) {
-               printk(KERN_INFO "SMP disabled\n");
-               disable_smp();
-               return;
-       }
-
-#ifdef CONFIG_XEN
-       smp_intr_init();
-#else
-
-       /*
-        * Switch from PIC to APIC mode.
-        */
-       connect_bsp_APIC();
-       setup_local_APIC();
-
-       if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
-               panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
-                     GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
-               /* Or can we switch back to PIC here? */
-       }
-#endif
-
-       /*
-        * Now start the IO-APICs
-        */
-#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
-#else
-       if (!skip_ioapic_setup && nr_ioapics)
-               setup_IO_APIC();
-       else
-               nr_ioapics = 0;
-#endif
-
-       /*
-        * Set up local APIC timer on boot CPU.
-        */
-
-#ifndef CONFIG_XEN
-       setup_boot_APIC_clock();
-#endif
-}
-
-/*
- * Early setup to make printk work.
- */
-void __init smp_prepare_boot_cpu(void)
-{
-       int me = smp_processor_id();
-       cpu_set(me, cpu_online_map);
-       cpu_set(me, cpu_callout_map);
-}
-
-/*
- * Entry point to boot a CPU.
- *
- * This is all __cpuinit, not __devinit for now because we don't support
- * CPU hotplug (yet).
- */
-int __cpuinit __cpu_up(unsigned int cpu)
-{
-       int err;
-#ifndef CONFIG_XEN
-       int apicid = cpu_present_to_apicid(cpu);
-#else
-       int apicid = cpu;
-#endif
-
-       WARN_ON(irqs_disabled());
-
-       Dprintk("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
-
-#ifndef CONFIG_XEN
-       if (apicid == BAD_APICID || apicid == boot_cpu_id ||
-           !physid_isset(apicid, phys_cpu_present_map)) {
-               printk("__cpu_up: bad cpu %d\n", cpu);
-               return -EINVAL;
-       }
-#endif
-
-       /* Boot it! */
-       err = do_boot_cpu(cpu, apicid);
-       if (err < 0) {
-               Dprintk("do_boot_cpu failed %d\n", err);
-               return err;
-       }
-
-       /* Unleash the CPU! */
-       Dprintk("waiting for cpu %d\n", cpu);
-
-       while (!cpu_isset(cpu, cpu_online_map))
-               cpu_relax();
-       return 0;
-}
-
-/*
- * Finish the SMP boot.
- */
-void __cpuinit smp_cpus_done(unsigned int max_cpus)
-{
-#ifndef CONFIG_XEN
-       zap_low_mappings();
-       smp_cleanup_boot();
-
-#ifdef CONFIG_X86_IO_APIC
-       setup_ioapic_dest();
-#endif
-#endif
-
-       detect_siblings();
-#ifndef CONFIG_XEN
-       time_init_gtod();
-
-       check_nmi_watchdog();
-#endif
-}
-
-#ifdef CONFIG_XEN
-extern int bind_ipi_to_irq(int ipi);
-extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
-extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
-
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static char resched_name[NR_CPUS][15];
-static char callfunc_name[NR_CPUS][15];
-
-void smp_intr_init(void)
-{
-       int cpu = smp_processor_id();
-
-       per_cpu(resched_irq, cpu) =
-               bind_ipi_to_irq(RESCHEDULE_VECTOR);
-       sprintf(resched_name[cpu], "resched%d", cpu);
-       BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
-                          SA_INTERRUPT, resched_name[cpu], NULL));
-
-       per_cpu(callfunc_irq, cpu) =
-               bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
-       sprintf(callfunc_name[cpu], "callfunc%d", cpu);
-       BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
-                          smp_call_function_interrupt,
-                          SA_INTERRUPT, callfunc_name[cpu], NULL));
-}
-
-static void smp_intr_exit(void)
-{
-       int cpu = smp_processor_id();
-
-       free_irq(per_cpu(resched_irq, cpu), NULL);
-       unbind_ipi_from_irq(RESCHEDULE_VECTOR);
-
-       free_irq(per_cpu(callfunc_irq, cpu), NULL);
-       unbind_ipi_from_irq(CALL_FUNCTION_VECTOR);
-}
-
-extern void local_setup_timer_irq(void);
-extern void local_teardown_timer_irq(void);
-
-void smp_suspend(void)
-{
-       local_teardown_timer_irq();
-       smp_intr_exit();
-}
-
-void smp_resume(void)
-{
-       smp_intr_init();
-       local_setup_timer_irq();
-}
-
-void vcpu_prepare(int vcpu)
-{
-}
-
-#endif
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h    
Fri Oct 21 19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,55 +0,0 @@
-/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
- * which needs to alter them. */
-
-static inline void smpboot_clear_io_apic_irqs(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       io_apic_irqs = 0;
-#endif
-}
-
-static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
-{
-#if 1
-       printk("smpboot_setup_warm_reset_vector\n");
-#else
-       CMOS_WRITE(0xa, 0xf);
-       local_flush_tlb();
-       Dprintk("1.\n");
-       *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
-       Dprintk("2.\n");
-       *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
-       Dprintk("3.\n");
-#endif
-}
-
-static inline void smpboot_restore_warm_reset_vector(void)
-{
-       /*
-        * Install writable page 0 entry to set BIOS data area.
-        */
-       local_flush_tlb();
-
-       /*
-        * Paranoid:  Set warm reset code and vector here back
-        * to default values.
-        */
-       CMOS_WRITE(0, 0xf);
-
-       *((volatile long *) phys_to_virt(0x467)) = 0;
-}
-
-static inline void smpboot_setup_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       /*
-        * Here we can be sure that there is an IO-APIC in the system. Let's
-        * go and set it up:
-        */
-       if (!skip_ioapic_setup && nr_ioapics)
-               setup_IO_APIC();
-#endif
-}
-
-
-#define        smp_found_config        (HYPERVISOR_shared_info->n_vcpu > 1)
diff -r ff7c5a791ed5 -r fdea4a967bc7 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h  
Fri Oct 21 19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,55 +0,0 @@
-/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
- * which needs to alter them. */
-
-static inline void smpboot_clear_io_apic_irqs(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       io_apic_irqs = 0;
-#endif
-}
-
-static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
-{
-#if 1
-       printk("smpboot_setup_warm_reset_vector\n");
-#else
-       CMOS_WRITE(0xa, 0xf);
-       local_flush_tlb();
-       Dprintk("1.\n");
-       *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
-       Dprintk("2.\n");
-       *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
-       Dprintk("3.\n");
-#endif
-}
-
-static inline void smpboot_restore_warm_reset_vector(void)
-{
-       /*
-        * Install writable page 0 entry to set BIOS data area.
-        */
-       local_flush_tlb();
-
-       /*
-        * Paranoid:  Set warm reset code and vector here back
-        * to default values.
-        */
-       CMOS_WRITE(0, 0xf);
-
-       *((volatile long *) phys_to_virt(0x467)) = 0;
-}
-
-static inline void smpboot_setup_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       /*
-        * Here we can be sure that there is an IO-APIC in the system. Let's
-        * go and set it up:
-        */
-       if (!skip_ioapic_setup && nr_ioapics)
-               setup_IO_APIC();
-#endif
-}
-
-
-#define        smp_found_config        (HYPERVISOR_shared_info->n_vcpu > 1)
diff -r ff7c5a791ed5 -r fdea4a967bc7 patches/linux-2.6.12/patch-2.6.12.5
--- a/patches/linux-2.6.12/patch-2.6.12.5       Fri Oct 21 19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,1614 +0,0 @@
-diff --git a/Makefile b/Makefile
---- a/Makefile
-+++ b/Makefile
-@@ -1,7 +1,7 @@
- VERSION = 2
- PATCHLEVEL = 6
- SUBLEVEL = 12
--EXTRAVERSION =
-+EXTRAVERSION = .5
- NAME=Woozy Numbat
- 
- # *DOCUMENTATION*
-@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
- #(which is the most common case IMHO) to avoid unneeded clutter in the big 
tags file.
- #Adding $(srctree) adds about 20M on i386 to the size of the output file!
- 
--ifeq ($(KBUILD_OUTPUT),)
-+ifeq ($(src),$(obj))
- __srctree =
- else
- __srctree = $(srctree)/
-diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 
b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
---- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
-+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
-@@ -44,7 +44,7 @@
- 
- #define PFX "powernow-k8: "
- #define BFX PFX "BIOS error: "
--#define VERSION "version 1.40.2"
-+#define VERSION "version 1.40.4"
- #include "powernow-k8.h"
- 
- /* serialize freq changes  */
-@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
- {
-       struct powernow_k8_data *data;
-       cpumask_t oldmask = CPU_MASK_ALL;
--      int rc;
-+      int rc, i;
- 
-       if (!check_supported_cpu(pol->cpu))
-               return -ENODEV;
-@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
-       printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
-              data->currfid, data->currvid);
- 
--      powernow_data[pol->cpu] = data;
-+      for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
-+              powernow_data[i] = data;
-+      }
- 
-       return 0;
- 
-diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
---- a/arch/i386/kernel/process.c
-+++ b/arch/i386/kernel/process.c
-@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
-       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-               return -EINVAL;
- 
-+      memset(&info, 0, sizeof(info));
-+
-       desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- 
-       info.entry_number = idx;
-diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
---- a/arch/ia64/kernel/ptrace.c
-+++ b/arch/ia64/kernel/ptrace.c
-@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
-                               *data = (pt->cr_ipsr & IPSR_MASK);
-                       return 0;
- 
-+                    case PT_AR_RSC:
-+                      if (write_access)
-+                              pt->ar_rsc = *data | (3 << 2); /* force PL3 */
-+                      else
-+                              *data = pt->ar_rsc;
-+                      return 0;
-+
-                     case PT_AR_RNAT:
-                       urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
-                       rnat_addr = (long) ia64_rse_rnat_addr((long *)
-@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
-                     case PT_AR_BSPSTORE:
-                       ptr = pt_reg_addr(pt, ar_bspstore);
-                       break;
--                    case PT_AR_RSC:
--                      ptr = pt_reg_addr(pt, ar_rsc);
--                      break;
-                     case PT_AR_UNAT:
-                       ptr = pt_reg_addr(pt, ar_unat);
-                       break;
-@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
- static long
- ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user 
*ppr)
- {
--      unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
-+      unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
-       struct unw_frame_info info;
-       struct switch_stack *sw;
-       struct ia64_fpreg fpval;
-@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
-       /* app regs */
- 
-       retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
--      retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
-+      retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
-       retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
-       retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
-       retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
-@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
-       retval |= __get_user(nat_bits, &ppr->nat);
- 
-       retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
-+      retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
-       retval |= access_uarea(child, PT_AR_EC, &ec, 1);
-       retval |= access_uarea(child, PT_AR_LC, &lc, 1);
-       retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
-diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
---- a/arch/ia64/kernel/signal.c
-+++ b/arch/ia64/kernel/signal.c
-@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
- static long
- restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
- {
--      unsigned long ip, flags, nat, um, cfm;
-+      unsigned long ip, flags, nat, um, cfm, rsc;
-       long err;
- 
-       /* Always make any pending restarted system calls return -EINTR */
-@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
-       err |= __get_user(ip, &sc->sc_ip);                      /* instruction 
pointer */
-       err |= __get_user(cfm, &sc->sc_cfm);
-       err |= __get_user(um, &sc->sc_um);                      /* user mask */
--      err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
-+      err |= __get_user(rsc, &sc->sc_ar_rsc);
-       err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
-       err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
-       err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
-@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
-       err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);       /* r15 
*/
- 
-       scr->pt.cr_ifs = cfm | (1UL << 63);
-+      scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
- 
-       /* establish new instruction pointer: */
-       scr->pt.cr_iip = ip & ~0x3UL;
-diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
---- a/arch/ppc/kernel/time.c
-+++ b/arch/ppc/kernel/time.c
-@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
- 
- extern unsigned long wall_jiffies;
- 
-+/* used for timezone offset */
-+static long timezone_offset;
-+
- DEFINE_SPINLOCK(rtc_lock);
- 
- EXPORT_SYMBOL(rtc_lock);
-@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
-                    xtime.tv_sec - last_rtc_update >= 659 &&
-                    abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 
500000/HZ &&
-                    jiffies - wall_jiffies == 1) {
--                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) 
== 0)
-+                      if (ppc_md.set_rtc_time(xtime.tv_sec+1 + 
timezone_offset) == 0)
-                               last_rtc_update = xtime.tv_sec+1;
-                       else
-                               /* Try again one minute later */
-@@ -286,7 +289,7 @@ void __init time_init(void)
-       unsigned old_stamp, stamp, elapsed;
- 
-         if (ppc_md.time_init != NULL)
--                time_offset = ppc_md.time_init();
-+                timezone_offset = ppc_md.time_init();
- 
-       if (__USE_RTC()) {
-               /* 601 processor: dec counts down by 128 every 128ns */
-@@ -331,10 +334,10 @@ void __init time_init(void)
-       set_dec(tb_ticks_per_jiffy);
- 
-       /* If platform provided a timezone (pmac), we correct the time */
--        if (time_offset) {
--              sys_tz.tz_minuteswest = -time_offset / 60;
-+        if (timezone_offset) {
-+              sys_tz.tz_minuteswest = -timezone_offset / 60;
-               sys_tz.tz_dsttime = 0;
--              xtime.tv_sec -= time_offset;
-+              xtime.tv_sec -= timezone_offset;
-         }
-         set_normalized_timespec(&wall_to_monotonic,
-                                 -xtime.tv_sec, -xtime.tv_nsec);
-diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
---- a/arch/ppc64/boot/zlib.c
-+++ b/arch/ppc64/boot/zlib.c
-@@ -1307,7 +1307,7 @@ local int huft_build(
-   {
-     *t = (inflate_huft *)Z_NULL;
-     *m = 0;
--    return Z_OK;
-+    return Z_DATA_ERROR;
-   }
- 
- 
-@@ -1351,6 +1351,7 @@ local int huft_build(
-     if ((j = *p++) != 0)
-       v[x[j]++] = i;
-   } while (++i < n);
-+  n = x[g];                   /* set n to length of v */
- 
- 
-   /* Generate the Huffman codes and for each, make the table entries */
-diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
---- a/arch/um/kernel/process.c
-+++ b/arch/um/kernel/process.c
-@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
-       return(arg.pid);
- }
- 
--static int ptrace_child(void)
-+static int ptrace_child(void *arg)
- {
-       int ret;
-       int pid = os_getpid(), ppid = getppid();
-@@ -159,16 +159,20 @@ static int ptrace_child(void)
-       _exit(ret);
- }
- 
--static int start_ptraced_child(void)
-+static int start_ptraced_child(void **stack_out)
- {
-+      void *stack;
-+      unsigned long sp;
-       int pid, n, status;
-       
--      pid = fork();
--      if(pid == 0)
--              ptrace_child();
--
-+      stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
-+                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-+      if(stack == MAP_FAILED)
-+              panic("check_ptrace : mmap failed, errno = %d", errno);
-+      sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
-+      pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
-       if(pid < 0)
--              panic("check_ptrace : fork failed, errno = %d", errno);
-+              panic("check_ptrace : clone failed, errno = %d", errno);
-       CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
-       if(n < 0)
-               panic("check_ptrace : wait failed, errno = %d", errno);
-@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
-               panic("check_ptrace : expected SIGSTOP, got status = %d",
-                     status);
- 
-+      *stack_out = stack;
-       return(pid);
- }
- 
-@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
-  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
-  * So only for SYSEMU features we test mustpanic, while normal host features
-  * must work anyway!*/
--static int stop_ptraced_child(int pid, int exitcode, int mustexit)
-+static int stop_ptraced_child(int pid, void *stack, int exitcode, int 
mustpanic)
- {
-       int status, n, ret = 0;
- 
-       if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
--              panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
-+              panic("check_ptrace : ptrace failed, errno = %d", errno);
-       CATCH_EINTR(n = waitpid(pid, &status, 0));
-       if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
-               int exit_with = WEXITSTATUS(status);
-@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
-               printk("check_ptrace : child exited with exitcode %d, while "
-                     "expecting %d; status 0x%x", exit_with,
-                     exitcode, status);
--              if (mustexit)
-+              if (mustpanic)
-                       panic("\n");
-               else
-                       printk("\n");
-               ret = -1;
-       }
- 
-+      if(munmap(stack, PAGE_SIZE) < 0)
-+              panic("check_ptrace : munmap failed, errno = %d", errno);
-       return ret;
- }
- 
-@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
- 
- static void __init check_sysemu(void)
- {
-+      void *stack;
-       int pid, syscall, n, status, count=0;
- 
-       printk("Checking syscall emulation patch for ptrace...");
-       sysemu_supported = 0;
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
- 
-       if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
-               goto fail;
-@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
-               panic("check_sysemu : failed to modify system "
-                     "call return, errno = %d", errno);
- 
--      if (stop_ptraced_child(pid, 0, 0) < 0)
-+      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
-               goto fail_stopped;
- 
-       sysemu_supported = 1;
-@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
-       set_using_sysemu(!force_sysemu_disabled);
- 
-       printk("Checking advanced syscall emulation patch for ptrace...");
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
-       while(1){
-               count++;
-               if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
-@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
-                       break;
-               }
-       }
--      if (stop_ptraced_child(pid, 0, 0) < 0)
-+      if (stop_ptraced_child(pid, stack, 0, 0) < 0)
-               goto fail_stopped;
- 
-       sysemu_supported = 2;
-@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
-       return;
- 
- fail:
--      stop_ptraced_child(pid, 1, 0);
-+      stop_ptraced_child(pid, stack, 1, 0);
- fail_stopped:
-       printk("missing\n");
- }
- 
- void __init check_ptrace(void)
- {
-+      void *stack;
-       int pid, syscall, n, status;
- 
-       printk("Checking that ptrace can change system call numbers...");
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
- 
-       if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) 
< 0)
-               panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", 
errno);
-@@ -330,7 +339,7 @@ void __init check_ptrace(void)
-                       break;
-               }
-       }
--      stop_ptraced_child(pid, 0, 1);
-+      stop_ptraced_child(pid, stack, 0, 1);
-       printk("OK\n");
-       check_sysemu();
- }
-@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
- static inline int check_skas3_ptrace_support(void)
- {
-       struct ptrace_faultinfo fi;
-+      void *stack;
-       int pid, n, ret = 1;
- 
-       printf("Checking for the skas3 patch in the host...");
--      pid = start_ptraced_child();
-+      pid = start_ptraced_child(&stack);
- 
-       n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
-       if (n < 0) {
-@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
-       }
- 
-       init_registers(pid);
--      stop_ptraced_child(pid, 1, 1);
-+      stop_ptraced_child(pid, stack, 1, 1);
- 
-       return(ret);
- }
-diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
---- a/arch/x86_64/ia32/syscall32.c
-+++ b/arch/x86_64/ia32/syscall32.c
-@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
-       int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
-       struct vm_area_struct *vma;
-       struct mm_struct *mm = current->mm;
-+      int ret;
- 
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!vma)
-@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
-       vma->vm_mm = mm;
- 
-       down_write(&mm->mmap_sem);
--      insert_vm_struct(mm, vma);
-+      if ((ret = insert_vm_struct(mm, vma))) {
-+              up_write(&mm->mmap_sem);
-+              kmem_cache_free(vm_area_cachep, vma);
-+              return ret;
-+      }
-       mm->total_vm += npages;
-       up_write(&mm->mmap_sem);
-       return 0;
-diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
---- a/arch/x86_64/kernel/setup.c
-+++ b/arch/x86_64/kernel/setup.c
-@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
-       int cpu = smp_processor_id();
-       int node = 0;
-       unsigned bits;
--      if (c->x86_num_cores == 1)
--              return;
- 
-       bits = 0;
-       while ((1 << bits) < c->x86_num_cores)
-diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
---- a/arch/x86_64/kernel/smp.c
-+++ b/arch/x86_64/kernel/smp.c
-@@ -284,6 +284,71 @@ struct call_data_struct {
- static struct call_data_struct * call_data;
- 
- /*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ */
-+static void __smp_call_function_single (int cpu, void (*func) (void *info), 
void *info,
-+                              int nonatomic, int wait)
-+{
-+      struct call_data_struct data;
-+      int cpus = 1;
-+
-+      data.func = func;
-+      data.info = info;
-+      atomic_set(&data.started, 0);
-+      data.wait = wait;
-+      if (wait)
-+              atomic_set(&data.finished, 0);
-+
-+      call_data = &data;
-+      wmb();
-+      /* Send a message to all other CPUs and wait for them to respond */
-+      send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-+
-+      /* Wait for response */
-+      while (atomic_read(&data.started) != cpus)
-+              cpu_relax();
-+
-+      if (!wait)
-+              return;
-+
-+      while (atomic_read(&data.finished) != cpus)
-+              cpu_relax();
-+}
-+
-+/*
-+ * Run a function on another CPU
-+ *  <func>    The function to run. This must be fast and non-blocking.
-+ *  <info>    An arbitrary pointer to pass to the function.
-+ *  <nonatomic>       Currently unused.
-+ *  <wait>    If true, wait until function has completed on other CPUs.
-+ *  [RETURNS]   0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
-+
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
-+      int nonatomic, int wait)
-+{
-+      
-+      int me = get_cpu(); /* prevent preemption and reschedule on another 
processor */
-+
-+      if (cpu == me) {
-+              printk("%s: trying to call self\n", __func__);
-+              put_cpu();
-+              return -EBUSY;
-+      }
-+      spin_lock_bh(&call_lock);
-+
-+      __smp_call_function_single(cpu, func,info,nonatomic,wait);      
-+
-+      spin_unlock_bh(&call_lock);
-+      put_cpu();
-+      return 0;
-+}
-+
-+/*
-  * this function sends a 'generic call function' IPI to all other CPUs
-  * in the system.
-  */
-diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
---- a/arch/x86_64/kernel/smpboot.c
-+++ b/arch/x86_64/kernel/smpboot.c
-@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
- {
-       unsigned long flags, i;
- 
--      if (smp_processor_id() != boot_cpu_id)
--              return;
--
-       go[MASTER] = 0;
- 
-       local_irq_save(flags);
-@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
-       return tcenter - best_tm;
- }
- 
--static __cpuinit void sync_tsc(void)
-+static __cpuinit void sync_tsc(unsigned int master)
- {
-       int i, done = 0;
-       long delta, adj, adjust_latency = 0;
-@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
-       } t[NUM_ROUNDS] __cpuinitdata;
- #endif
- 
-+      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
-+              smp_processor_id(), master);
-+
-       go[MASTER] = 1;
- 
--      smp_call_function(sync_master, NULL, 1, 0);
-+      /* It is dangerous to broadcast IPI as cpus are coming up,
-+       * as they may not be ready to accept them.  So since
-+       * we only need to send the ipi to the boot cpu direct
-+       * the message, and avoid the race.
-+       */
-+      smp_call_function_single(master, sync_master, NULL, 1, 0);
- 
-       while (go[MASTER])      /* wait for master to be ready */
-               no_cpu_relax();
-@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
-       printk(KERN_INFO
-              "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
-              "maxerr %lu cycles)\n",
--             smp_processor_id(), boot_cpu_id, delta, rt);
-+             smp_processor_id(), master, delta, rt);
- }
- 
- static void __cpuinit tsc_sync_wait(void)
- {
-       if (notscsync || !cpu_has_tsc)
-               return;
--      printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
--                      boot_cpu_id);
--      sync_tsc();
-+      sync_tsc(0);
- }
- 
- static __init int notscsync_setup(char *s)
-diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
---- a/drivers/acpi/pci_irq.c
-+++ b/drivers/acpi/pci_irq.c
-@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
-               printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
-                       pci_name(dev), ('A' + pin));
-               /* Interrupt Line values above 0xF are forbidden */
--              if (dev->irq >= 0 && (dev->irq <= 0xF)) {
-+              if (dev->irq > 0 && (dev->irq <= 0xF)) {
-                       printk(" - using IRQ %d\n", dev->irq);
-+                      acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, 
ACPI_ACTIVE_LOW);
-                       return_VALUE(0);
-               }
-               else {
-diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
---- a/drivers/char/rocket.c
-+++ b/drivers/char/rocket.c
-@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
-               ToRecv = space;
- 
-       if (ToRecv <= 0)
--              return;
-+              goto done;
- 
-       /*
-        * if status indicates there are errored characters in the
-@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
-       }
-       /*  Push the data up to the tty layer */
-       ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
-+done:
-       tty_ldisc_deref(ld);
- }
- 
-diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
---- a/drivers/char/tpm/tpm.c
-+++ b/drivers/char/tpm/tpm.c
-@@ -32,12 +32,6 @@
- 
- #define       TPM_BUFSIZE                     2048
- 
--/* PCI configuration addresses */
--#define       PCI_GEN_PMCON_1                 0xA0
--#define       PCI_GEN1_DEC                    0xE4
--#define       PCI_LPC_EN                      0xE6
--#define       PCI_GEN2_DEC                    0xEC
--
- static LIST_HEAD(tpm_chip_list);
- static DEFINE_SPINLOCK(driver_lock);
- static int dev_mask[32];
-@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
- EXPORT_SYMBOL_GPL(tpm_time_expired);
- 
- /*
-- * Initialize the LPC bus and enable the TPM ports
-- */
--int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
--{
--      u32 lpcenable, tmp;
--      int is_lpcm = 0;
--
--      switch (pci_dev->vendor) {
--      case PCI_VENDOR_ID_INTEL:
--              switch (pci_dev->device) {
--              case PCI_DEVICE_ID_INTEL_82801CA_12:
--              case PCI_DEVICE_ID_INTEL_82801DB_12:
--                      is_lpcm = 1;
--                      break;
--              }
--              /* init ICH (enable LPC) */
--              pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
--              lpcenable |= 0x20000000;
--              pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
--
--              if (is_lpcm) {
--                      pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
--                                            &lpcenable);
--                      if ((lpcenable & 0x20000000) == 0) {
--                              dev_err(&pci_dev->dev,
--                                      "cannot enable LPC\n");
--                              return -ENODEV;
--                      }
--              }
--
--              /* initialize TPM registers */
--              pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
--
--              if (!is_lpcm)
--                      tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
--              else
--                      tmp =
--                          (tmp & 0xFFFF0000) | (base & 0xFFF0) |
--                          0x00000001;
--
--              pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
--
--              if (is_lpcm) {
--                      pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
--                                            &tmp);
--                      tmp |= 0x00000004;      /* enable CLKRUN */
--                      pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
--                                             tmp);
--              }
--              tpm_write_index(0x0D, 0x55);    /* unlock 4F */
--              tpm_write_index(0x0A, 0x00);    /* int disable */
--              tpm_write_index(0x08, base);    /* base addr lo */
--              tpm_write_index(0x09, (base & 0xFF00) >> 8);    /* base addr hi 
*/
--              tpm_write_index(0x0D, 0xAA);    /* lock 4F */
--              break;
--      case PCI_VENDOR_ID_AMD:
--              /* nothing yet */
--              break;
--      }
--
--      return 0;
--}
--
--EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
--
--/*
-  * Internal kernel interface to transmit TPM commands
-  */
- static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
-@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
-       if (chip == NULL)
-               return -ENODEV;
- 
--      spin_lock(&driver_lock);
--      tpm_lpc_bus_init(pci_dev, chip->vendor->base);
--      spin_unlock(&driver_lock);
--
-       return 0;
- }
- 
-diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
---- a/drivers/char/tpm/tpm.h
-+++ b/drivers/char/tpm/tpm.h
-@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
- }
- 
- extern void tpm_time_expired(unsigned long);
--extern int tpm_lpc_bus_init(struct pci_dev *, u16);
--
- extern int tpm_register_hardware(struct pci_dev *,
-                                struct tpm_vendor_specific *);
- extern int tpm_open(struct inode *, struct file *);
-diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
---- a/drivers/char/tpm/tpm_atmel.c
-+++ b/drivers/char/tpm/tpm_atmel.c
-@@ -22,7 +22,10 @@
- #include "tpm.h"
- 
- /* Atmel definitions */
--#define       TPM_ATML_BASE                   0x400
-+enum tpm_atmel_addr {
-+      TPM_ATMEL_BASE_ADDR_LO = 0x08,
-+      TPM_ATMEL_BASE_ADDR_HI = 0x09
-+};
- 
- /* write status bits */
- #define       ATML_STATUS_ABORT               0x01
-@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
-       .cancel = tpm_atml_cancel,
-       .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
-       .req_complete_val = ATML_STATUS_DATA_AVAIL,
--      .base = TPM_ATML_BASE,
-       .miscdev = { .fops = &atmel_ops, },
- };
- 
-@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
- {
-       u8 version[4];
-       int rc = 0;
-+      int lo, hi;
- 
-       if (pci_enable_device(pci_dev))
-               return -EIO;
- 
--      if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
--              rc = -ENODEV;
--              goto out_err;
--      }
-+      lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
-+      hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
-+
-+      tpm_atmel.base = (hi<<8)|lo;
-+      dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
- 
-       /* verify that it is an Atmel part */
-       if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
-diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
---- a/drivers/char/tpm/tpm_nsc.c
-+++ b/drivers/char/tpm/tpm_nsc.c
-@@ -24,6 +24,10 @@
- /* National definitions */
- #define       TPM_NSC_BASE                    0x360
- #define       TPM_NSC_IRQ                     0x07
-+#define       TPM_NSC_BASE0_HI                0x60
-+#define       TPM_NSC_BASE0_LO                0x61
-+#define       TPM_NSC_BASE1_HI                0x62
-+#define       TPM_NSC_BASE1_LO                0x63
- 
- #define       NSC_LDN_INDEX                   0x07
- #define       NSC_SID_INDEX                   0x20
-@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
-       .cancel = tpm_nsc_cancel,
-       .req_complete_mask = NSC_STATUS_OBF,
-       .req_complete_val = NSC_STATUS_OBF,
--      .base = TPM_NSC_BASE,
-       .miscdev = { .fops = &nsc_ops, },
-       
- };
-@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
-                                 const struct pci_device_id *pci_id)
- {
-       int rc = 0;
-+      int lo, hi;
-+
-+      hi = tpm_read_index(TPM_NSC_BASE0_HI);
-+      lo = tpm_read_index(TPM_NSC_BASE0_LO);
-+
-+      tpm_nsc.base = (hi<<8) | lo;
- 
-       if (pci_enable_device(pci_dev))
-               return -EIO;
- 
--      if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
--              rc = -ENODEV;
--              goto out_err;
--      }
--
-       /* verify that it is a National part (SID) */
-       if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
-               rc = -ENODEV;
-diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
---- a/drivers/char/tty_ioctl.c
-+++ b/drivers/char/tty_ioctl.c
-@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
-                       ld = tty_ldisc_ref(tty);
-                       switch (arg) {
-                       case TCIFLUSH:
--                              if (ld->flush_buffer)
-+                              if (ld && ld->flush_buffer)
-                                       ld->flush_buffer(tty);
-                               break;
-                       case TCIOFLUSH:
--                              if (ld->flush_buffer)
-+                              if (ld && ld->flush_buffer)
-                                       ld->flush_buffer(tty);
-                               /* fall through */
-                       case TCOFLUSH:
-diff --git a/drivers/media/video/cx88/cx88-video.c 
b/drivers/media/video/cx88/cx88-video.c
---- a/drivers/media/video/cx88/cx88-video.c
-+++ b/drivers/media/video/cx88/cx88-video.c
-@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
-                       .default_value = 0,
-                       .type          = V4L2_CTRL_TYPE_INTEGER,
-               },
--              .off                   = 0,
-+              .off                   = 128,
-               .reg                   = MO_HUE,
-               .mask                  = 0x00ff,
-               .shift                 = 0,
-diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
---- a/drivers/net/e1000/e1000_main.c
-+++ b/drivers/net/e1000/e1000_main.c
-@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
-       tso = e1000_tso(adapter, skb);
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-+              spin_unlock_irqrestore(&adapter->tx_lock, flags);
-               return NETDEV_TX_OK;
-       }
- 
-diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
---- a/drivers/net/hamradio/Kconfig
-+++ b/drivers/net/hamradio/Kconfig
-@@ -17,7 +17,7 @@ config MKISS
- 
- config 6PACK
-       tristate "Serial port 6PACK driver"
--      depends on AX25 && BROKEN_ON_SMP
-+      depends on AX25
-       ---help---
-         6pack is a transmission protocol for the data exchange between your
-         PC and your TNC (the Terminal Node Controller acts as a kind of
-diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
---- a/drivers/net/shaper.c
-+++ b/drivers/net/shaper.c
-@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
- {
-       struct shaper *shaper = dev->priv;
-       struct sk_buff *ptr;
--   
--      if (down_trylock(&shaper->sem))
--              return -1;
- 
-+      spin_lock(&shaper->lock);
-       ptr=shaper->sendq.prev;
-       
-       /*
-@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
-                 shaper->stats.collisions++;
-       }
-       shaper_kick(shaper);
--      up(&shaper->sem);
-+      spin_unlock(&shaper->lock);
-       return 0;
- }
- 
-@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
- {
-       struct shaper *shaper = (struct shaper *)data;
- 
--      if (!down_trylock(&shaper->sem)) {
--              shaper_kick(shaper);
--              up(&shaper->sem);
--      } else
--              mod_timer(&shaper->timer, jiffies);
-+      spin_lock(&shaper->lock);
-+      shaper_kick(shaper);
-+      spin_unlock(&shaper->lock);
- }
- 
- /*
-@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
- 
- 
- /*
-- *    Flush the shaper queues on a closedown
-- */
-- 
--static void shaper_flush(struct shaper *shaper)
--{
--      struct sk_buff *skb;
--
--      down(&shaper->sem);
--      while((skb=skb_dequeue(&shaper->sendq))!=NULL)
--              dev_kfree_skb(skb);
--      shaper_kick(shaper);
--      up(&shaper->sem);
--}
--
--/*
-  *    Bring the interface up. We just disallow this until a 
-  *    bind.
-  */
-@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
- static int shaper_close(struct net_device *dev)
- {
-       struct shaper *shaper=dev->priv;
--      shaper_flush(shaper);
-+      struct sk_buff *skb;
-+
-+      while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
-+              dev_kfree_skb(skb);
-+
-+      spin_lock_bh(&shaper->lock);
-+      shaper_kick(shaper);
-+      spin_unlock_bh(&shaper->lock);
-+
-       del_timer_sync(&shaper->timer);
-       return 0;
- }
-@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
-       init_timer(&sh->timer);
-       sh->timer.function=shaper_timer;
-       sh->timer.data=(unsigned long)sh;
-+      spin_lock_init(&sh->lock);
- }
- 
- /*
-diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
---- a/drivers/pci/pci-driver.c
-+++ b/drivers/pci/pci-driver.c
-@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
-       /* FIXME, once all of the existing PCI drivers have been fixed to set
-        * the pci shutdown function, this test can go away. */
-       if (!drv->driver.shutdown)
--              drv->driver.shutdown = pci_device_shutdown,
-+              drv->driver.shutdown = pci_device_shutdown;
-       drv->driver.owner = drv->owner;
-       drv->driver.kobj.ktype = &pci_driver_kobj_type;
-       pci_init_dynids(&drv->dynids);
-diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
---- a/drivers/scsi/qla2xxx/qla_init.c
-+++ b/drivers/scsi/qla2xxx/qla_init.c
-@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
-               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
- 
-       fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
--      if (!rport)
-+      if (!rport) {
-               qla_printk(KERN_WARNING, ha,
-                   "Unable to allocate fc remote port!\n");
-+              return;
-+      }
- 
-       if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
-               fcport->os_target_id = rport->scsi_target_id;
-diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
---- a/drivers/scsi/qla2xxx/qla_os.c
-+++ b/drivers/scsi/qla2xxx/qla_os.c
-@@ -1150,7 +1150,7 @@ iospace_error_exit:
-  */
- int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
- {
--      int     ret;
-+      int     ret = -ENODEV;
-       device_reg_t __iomem *reg;
-       struct Scsi_Host *host;
-       scsi_qla_host_t *ha;
-@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
-       fc_port_t *fcport;
- 
-       if (pci_enable_device(pdev))
--              return -1;
-+              goto probe_out;
- 
-       host = scsi_host_alloc(&qla2x00_driver_template,
-           sizeof(scsi_qla_host_t));
-@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
- 
-       /* Configure PCI I/O space */
-       ret = qla2x00_iospace_config(ha);
--      if (ret != 0) {
--              goto probe_alloc_failed;
--      }
-+      if (ret)
-+              goto probe_failed;
- 
-       /* Sanitize the information from PCI BIOS. */
-       host->irq = pdev->irq;
-@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
-               qla_printk(KERN_WARNING, ha,
-                   "[ERROR] Failed to allocate memory for adapter\n");
- 
--              goto probe_alloc_failed;
-+              ret = -ENOMEM;
-+              goto probe_failed;
-       }
- 
--      pci_set_drvdata(pdev, ha);
--      host->this_id = 255;
--      host->cmd_per_lun = 3;
--      host->unique_id = ha->instance;
--      host->max_cmd_len = MAX_CMDSZ;
--      host->max_channel = ha->ports - 1;
--      host->max_id = ha->max_targets;
--      host->max_lun = ha->max_luns;
--      host->transportt = qla2xxx_transport_template;
--      if (scsi_add_host(host, &pdev->dev))
--              goto probe_alloc_failed;
--
--      qla2x00_alloc_sysfs_attr(ha);
--
-       if (qla2x00_initialize_adapter(ha) &&
-           !(ha->device_flags & DFLG_NO_CABLE)) {
- 
-@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
-                   "Adapter flags %x.\n",
-                   ha->host_no, ha->device_flags));
- 
-+              ret = -ENODEV;
-               goto probe_failed;
-       }
- 
--      qla2x00_init_host_attr(ha);
--
-       /*
-        * Startup the kernel thread for this host adapter
-        */
-@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
-               qla_printk(KERN_WARNING, ha,
-                   "Unable to start DPC thread!\n");
- 
-+              ret = -ENODEV;
-               goto probe_failed;
-       }
-       wait_for_completion(&ha->dpc_inited);
- 
-+      host->this_id = 255;
-+      host->cmd_per_lun = 3;
-+      host->unique_id = ha->instance;
-+      host->max_cmd_len = MAX_CMDSZ;
-+      host->max_channel = ha->ports - 1;
-+      host->max_lun = MAX_LUNS;
-+      host->transportt = qla2xxx_transport_template;
-+
-       if (IS_QLA2100(ha) || IS_QLA2200(ha))
-               ret = request_irq(host->irq, qla2100_intr_handler,
-                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
-       else
-               ret = request_irq(host->irq, qla2300_intr_handler,
-                   SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
--      if (ret != 0) {
-+      if (ret) {
-               qla_printk(KERN_WARNING, ha,
-                   "Failed to reserve interrupt %d already in use.\n",
-                   host->irq);
-@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
-               msleep(10);
-       }
- 
-+      pci_set_drvdata(pdev, ha);
-       ha->flags.init_done = 1;
-       num_hosts++;
- 
-+      ret = scsi_add_host(host, &pdev->dev);
-+      if (ret)
-+              goto probe_failed;
-+
-+      qla2x00_alloc_sysfs_attr(ha);
-+
-+      qla2x00_init_host_attr(ha);
-+
-       qla_printk(KERN_INFO, ha, "\n"
-           " QLogic Fibre Channel HBA Driver: %s\n"
-           "  QLogic %s - %s\n"
-@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
- probe_failed:
-       fc_remove_host(ha->host);
- 
--      scsi_remove_host(host);
--
--probe_alloc_failed:
-       qla2x00_free_device(ha);
- 
-       scsi_host_put(host);
-@@ -1394,7 +1394,8 @@ probe_alloc_failed:
- probe_disable_device:
-       pci_disable_device(pdev);
- 
--      return -1;
-+probe_out:
-+      return ret;
- }
- EXPORT_SYMBOL_GPL(qla2x00_probe_one);
- 
-diff --git a/fs/bio.c b/fs/bio.c
---- a/fs/bio.c
-+++ b/fs/bio.c
-@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
-        */
-       bio->bi_vcnt = bio_src->bi_vcnt;
-       bio->bi_size = bio_src->bi_size;
-+      bio->bi_idx = bio_src->bi_idx;
-       bio_phys_segments(q, bio);
-       bio_hw_segments(q, bio);
- }
-diff --git a/fs/char_dev.c b/fs/char_dev.c
---- a/fs/char_dev.c
-+++ b/fs/char_dev.c
-@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
-       struct char_device_struct *cd = NULL, **cp;
-       int i = major_to_index(major);
- 
--      up(&chrdevs_lock);
-+      down(&chrdevs_lock);
-       for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
-               if ((*cp)->major == major &&
-                   (*cp)->baseminor == baseminor &&
-diff --git a/fs/exec.c b/fs/exec.c
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
-       }
-       sig->group_exit_task = NULL;
-       sig->notify_count = 0;
-+      sig->real_timer.data = (unsigned long)current;
-       spin_unlock_irq(lock);
- 
-       /*
-diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
---- a/fs/isofs/compress.c
-+++ b/fs/isofs/compress.c
-@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
-       cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
-       brelse(bh);
- 
-+      if (cstart > cend)
-+              goto eio;
-+              
-       csize = cend-cstart;
- 
-+      if (csize > deflateBound(1UL << zisofs_block_shift))
-+              goto eio;
-+
-       /* Now page[] contains an array of pages, any of which can be NULL,
-          and the locks on which we hold.  We should now read the data and
-          release the pages.  If the pages are NULL the decompressed data
-diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
---- a/include/asm-i386/string.h
-+++ b/include/asm-i386/string.h
-@@ -116,7 +116,8 @@ __asm__ __volatile__(
-       "orb $1,%%al\n"
-       "3:"
-       :"=a" (__res), "=&S" (d0), "=&D" (d1)
--                   :"1" (cs),"2" (ct));
-+      :"1" (cs),"2" (ct)
-+      :"memory");
- return __res;
- }
- 
-@@ -138,8 +139,9 @@ __asm__ __volatile__(
-       "3:\tsbbl %%eax,%%eax\n\t"
-       "orb $1,%%al\n"
-       "4:"
--                   :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
--                   :"1" (cs),"2" (ct),"3" (count));
-+      :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
-+      :"1" (cs),"2" (ct),"3" (count)
-+      :"memory");
- return __res;
- }
- 
-@@ -158,7 +160,9 @@ __asm__ __volatile__(
-       "movl $1,%1\n"
-       "2:\tmovl %1,%0\n\t"
-       "decl %0"
--      :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
-+      :"=a" (__res), "=&S" (d0)
-+      :"1" (s),"0" (c)
-+      :"memory");
- return __res;
- }
- 
-@@ -175,7 +179,9 @@ __asm__ __volatile__(
-       "leal -1(%%esi),%0\n"
-       "2:\ttestb %%al,%%al\n\t"
-       "jne 1b"
--      :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
-+      :"=g" (__res), "=&S" (d0), "=&a" (d1)
-+      :"0" (0),"1" (s),"2" (c)
-+      :"memory");
- return __res;
- }
- 
-@@ -189,7 +195,9 @@ __asm__ __volatile__(
-       "scasb\n\t"
-       "notl %0\n\t"
-       "decl %0"
--      :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
-+      :"=c" (__res), "=&D" (d0)
-+      :"1" (s),"a" (0), "0" (0xffffffffu)
-+      :"memory");
- return __res;
- }
- 
-@@ -333,7 +341,9 @@ __asm__ __volatile__(
-       "je 1f\n\t"
-       "movl $1,%0\n"
-       "1:\tdecl %0"
--      :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
-+      :"=D" (__res), "=&c" (d0)
-+      :"a" (c),"0" (cs),"1" (count)
-+      :"memory");
- return __res;
- }
- 
-@@ -369,7 +379,7 @@ __asm__ __volatile__(
-       "je 2f\n\t"
-       "stosb\n"
-       "2:"
--      : "=&c" (d0), "=&D" (d1)
-+      :"=&c" (d0), "=&D" (d1)
-       :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
-       :"memory");
- return (s);   
-@@ -392,7 +402,8 @@ __asm__ __volatile__(
-       "jne 1b\n"
-       "3:\tsubl %2,%0"
-       :"=a" (__res), "=&d" (d0)
--      :"c" (s),"1" (count));
-+      :"c" (s),"1" (count)
-+      :"memory");
- return __res;
- }
- /* end of additional stuff */
-@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
-               "dec %%edi\n"
-               "1:"
-               : "=D" (addr), "=c" (size)
--              : "0" (addr), "1" (size), "a" (c));
-+              : "0" (addr), "1" (size), "a" (c)
-+              : "memory");
-       return addr;
- }
- 
-diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
---- a/include/asm-x86_64/smp.h
-+++ b/include/asm-x86_64/smp.h
-@@ -46,6 +46,8 @@ extern int pic_mode;
- extern int smp_num_siblings;
- extern void smp_flush_tlb(void);
- extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-+extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
-+                                   int retry, int wait);
- extern void smp_send_reschedule(int cpu);
- extern void smp_invalidate_rcv(void);         /* Process an NMI */
- extern void zap_low_mappings(void);
-diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
---- a/include/linux/if_shaper.h
-+++ b/include/linux/if_shaper.h
-@@ -23,7 +23,7 @@ struct shaper
-       __u32 shapeclock;
-       unsigned long recovery; /* Time we can next clock a packet out on
-                                  an empty queue */
--      struct semaphore sem;
-+      spinlock_t lock;
-         struct net_device_stats stats;
-       struct net_device *dev;
-       int  (*hard_start_xmit) (struct sk_buff *skb,
-diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
- {
-       int hlen = skb_headlen(skb);
- 
--      if (offset + len <= hlen)
-+      if (hlen - offset >= len)
-               return skb->data + offset;
- 
-       if (skb_copy_bits(skb, offset, buffer, len) < 0)
-diff --git a/include/linux/zlib.h b/include/linux/zlib.h
---- a/include/linux/zlib.h
-+++ b/include/linux/zlib.h
-@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
-    stream state was inconsistent (such as zalloc or state being NULL).
- */
- 
-+static inline unsigned long deflateBound(unsigned long s)
-+{
-+      return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
-+}
-+
- extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
- /*
-      Dynamically update the compression level and compression strategy.  The
-diff --git a/kernel/module.c b/kernel/module.c
---- a/kernel/module.c
-+++ b/kernel/module.c
-@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
- /* Created by linker magic */
- extern char __per_cpu_start[], __per_cpu_end[];
- 
--static void *percpu_modalloc(unsigned long size, unsigned long align)
-+static void *percpu_modalloc(unsigned long size, unsigned long align,
-+                           const char *name)
- {
-       unsigned long extra;
-       unsigned int i;
-       void *ptr;
- 
--      BUG_ON(align > SMP_CACHE_BYTES);
-+      if (align > SMP_CACHE_BYTES) {
-+              printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
-+                     name, align, SMP_CACHE_BYTES);
-+              align = SMP_CACHE_BYTES;
-+      }
- 
-       ptr = __per_cpu_start;
-       for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
-@@ -347,7 +352,8 @@ static int percpu_modinit(void)
- }     
- __initcall(percpu_modinit);
- #else /* ... !CONFIG_SMP */
--static inline void *percpu_modalloc(unsigned long size, unsigned long align)
-+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
-+                                  const char *name)
- {
-       return NULL;
- }
-@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
-       if (pcpuindex) {
-               /* We have a special allocation for this section. */
-               percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
--                                       sechdrs[pcpuindex].sh_addralign);
-+                                       sechdrs[pcpuindex].sh_addralign,
-+                                       mod->name);
-               if (!percpu) {
-                       err = -ENOMEM;
-                       goto free_mod;
-diff --git a/lib/inflate.c b/lib/inflate.c
---- a/lib/inflate.c
-+++ b/lib/inflate.c
-@@ -326,7 +326,7 @@ DEBG("huft1 ");
-   {
-     *t = (struct huft *)NULL;
-     *m = 0;
--    return 0;
-+    return 2;
-   }
- 
- DEBG("huft2 ");
-@@ -374,6 +374,7 @@ DEBG("huft5 ");
-     if ((j = *p++) != 0)
-       v[x[j]++] = i;
-   } while (++i < n);
-+  n = x[g];                   /* set n to length of v */
- 
- DEBG("h6 ");
- 
-@@ -410,12 +411,13 @@ DEBG1("1 ");
- DEBG1("2 ");
-           f -= a + 1;           /* deduct codes from patterns left */
-           xp = c + k;
--          while (++j < z)       /* try smaller tables up to z bits */
--          {
--            if ((f <<= 1) <= *++xp)
--              break;            /* enough codes to use up j bits */
--            f -= *xp;           /* else deduct codes from patterns */
--          }
-+          if (j < z)
-+            while (++j < z)       /* try smaller tables up to z bits */
-+            {
-+              if ((f <<= 1) <= *++xp)
-+                break;            /* enough codes to use up j bits */
-+              f -= *xp;           /* else deduct codes from patterns */
-+            }
-         }
- DEBG1("3 ");
-         z = 1 << j;             /* table entries for j-bit table */
-diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
---- a/lib/zlib_inflate/inftrees.c
-+++ b/lib/zlib_inflate/inftrees.c
-@@ -141,7 +141,7 @@ static int huft_build(
-   {
-     *t = NULL;
-     *m = 0;
--    return Z_OK;
-+    return Z_DATA_ERROR;
-   }
- 
- 
-diff --git a/mm/memory.c b/mm/memory.c
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
- {
-       pgd_t *pgd;
-       unsigned long next;
--      unsigned long end = addr + size;
-+      unsigned long end = addr + PAGE_ALIGN(size);
-       struct mm_struct *mm = vma->vm_mm;
-       int err;
- 
-diff --git a/mm/mempolicy.c b/mm/mempolicy.c
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
-       struct mempolicy *new;
-       DECLARE_BITMAP(nodes, MAX_NUMNODES);
- 
--      if (mode > MPOL_MAX)
-+      if (mode < 0 || mode > MPOL_MAX)
-               return -EINVAL;
-       err = get_nodes(nodes, nmask, maxnode, mode);
-       if (err)
-diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
---- a/net/8021q/vlan.c
-+++ b/net/8021q/vlan.c
-@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
-                       if (!vlandev)
-                               continue;
- 
-+                      if (netif_carrier_ok(dev)) {
-+                              if (!netif_carrier_ok(vlandev))
-+                                      netif_carrier_on(vlandev);
-+                      } else {
-+                              if (netif_carrier_ok(vlandev))
-+                                      netif_carrier_off(vlandev);
-+                      }
-+
-                       if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
-                               vlandev->state = (vlandev->state &~ 
VLAN_LINK_STATE_MASK) 
-                                       | flgs;
-diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
---- a/net/ipv4/ip_output.c
-+++ b/net/ipv4/ip_output.c
-@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
- #ifdef CONFIG_NETFILTER_DEBUG
-       nf_debug_ip_loopback_xmit(newskb);
- #endif
--      nf_reset(newskb);
-       netif_rx(newskb);
-       return 0;
- }
-@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
-       nf_debug_ip_finish_output2(skb);
- #endif /*CONFIG_NETFILTER_DEBUG*/
- 
--      nf_reset(skb);
--
-       if (hh) {
-               int hh_alen;
- 
-diff --git a/net/ipv4/netfilter/ip_conntrack_core.c 
b/net/ipv4/netfilter/ip_conntrack_core.c
---- a/net/ipv4/netfilter/ip_conntrack_core.c
-+++ b/net/ipv4/netfilter/ip_conntrack_core.c
-@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
-               schedule();
-               goto i_see_dead_people;
-       }
-+      /* wait until all references to ip_conntrack_untracked are dropped */
-+      while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
-+              schedule();
- 
-       kmem_cache_destroy(ip_conntrack_cachep);
-       kmem_cache_destroy(ip_conntrack_expect_cachep);
-diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c 
b/net/ipv4/netfilter/ip_conntrack_standalone.c
---- a/net/ipv4/netfilter/ip_conntrack_standalone.c
-+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
-@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
- {
-+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
-+      /* Previously seen (loopback)?  Ignore.  Do this before
-+           fragment check. */
-+      if ((*pskb)->nfct)
-+              return NF_ACCEPT;
-+#endif
-+
-       /* Gather fragments. */
-       if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
-               *pskb = ip_ct_gather_frags(*pskb,
-diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c 
b/net/ipv4/netfilter/ip_nat_proto_tcp.c
---- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
-+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
-@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
-                enum ip_nat_manip_type maniptype,
-                const struct ip_conntrack *conntrack)
- {
--      static u_int16_t port, *portptr;
-+      static u_int16_t port;
-+      u_int16_t *portptr;
-       unsigned int range_size, min, i;
- 
-       if (maniptype == IP_NAT_MANIP_SRC)
-diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c 
b/net/ipv4/netfilter/ip_nat_proto_udp.c
---- a/net/ipv4/netfilter/ip_nat_proto_udp.c
-+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
-@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
-                enum ip_nat_manip_type maniptype,
-                const struct ip_conntrack *conntrack)
- {
--      static u_int16_t port, *portptr;
-+      static u_int16_t port;
-+      u_int16_t *portptr;
-       unsigned int range_size, min, i;
- 
-       if (maniptype == IP_NAT_MANIP_SRC)
-diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
---- a/net/ipv6/netfilter/ip6_queue.c
-+++ b/net/ipv6/netfilter/ip6_queue.c
-@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
- static void
- ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
- {
-+      local_bh_disable();
-       nf_reinject(entry->skb, entry->info, verdict);
-+      local_bh_enable();
-       kfree(entry);
- }
- 
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -315,8 +315,8 @@ err:
- static void netlink_remove(struct sock *sk)
- {
-       netlink_table_grab();
--      nl_table[sk->sk_protocol].hash.entries--;
--      sk_del_node_init(sk);
-+      if (sk_del_node_init(sk))
-+              nl_table[sk->sk_protocol].hash.entries--;
-       if (nlk_sk(sk)->groups)
-               __sk_del_bind_node(sk);
-       netlink_table_ungrab();
-@@ -429,7 +429,12 @@ retry:
-       err = netlink_insert(sk, pid);
-       if (err == -EADDRINUSE)
-               goto retry;
--      return 0;
-+
-+      /* If 2 threads race to autobind, that is fine.  */
-+      if (err == -EBUSY)
-+              err = 0;
-+
-+      return err;
- }
- 
- static inline int netlink_capable(struct socket *sock, unsigned int flag) 
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
-       dst_release(skb->dst);
-       skb->dst = NULL;
- 
-+      /* drop conntrack reference */
-+      nf_reset(skb);
-+
-       spkt = (struct sockaddr_pkt*)skb->cb;
- 
-       skb_push(skb, skb->data-skb->mac.raw);
-@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
-       dst_release(skb->dst);
-       skb->dst = NULL;
- 
-+      /* drop conntrack reference */
-+      nf_reset(skb);
-+
-       spin_lock(&sk->sk_receive_queue.lock);
-       po->stats.tp_packets++;
-       __skb_queue_tail(&sk->sk_receive_queue, skb);
-diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
---- a/net/xfrm/xfrm_user.c
-+++ b/net/xfrm/xfrm_user.c
-@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
-       if (nr > XFRM_MAX_DEPTH)
-               return NULL;
- 
-+      if (p->dir > XFRM_POLICY_OUT)
-+              return NULL;
-+
-       xp = xfrm_policy_alloc(GFP_KERNEL);
-       if (xp == NULL) {
-               *dir = -ENOBUFS;
-diff --git a/security/keys/keyring.c b/security/keys/keyring.c
---- a/security/keys/keyring.c
-+++ b/security/keys/keyring.c
-@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
- 
-       if (keyring->description) {
-               write_lock(&keyring_name_lock);
--              list_del(&keyring->type_data.link);
-+
-+              if (keyring->type_data.link.next != NULL &&
-+                  !list_empty(&keyring->type_data.link))
-+                      list_del(&keyring->type_data.link);
-+
-               write_unlock(&keyring_name_lock);
-       }
- 
-diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
---- a/security/keys/process_keys.c
-+++ b/security/keys/process_keys.c
-@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
-               keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
-               if (IS_ERR(keyring)) {
-                       ret = PTR_ERR(keyring);
--                      goto error;
-+                      goto error2;
-               }
-       }
-       else if (IS_ERR(keyring)) {
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/python/xen/util/process.py
--- a/tools/python/xen/util/process.py  Fri Oct 21 19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,39 +0,0 @@
-# Copyright (C) 2005 Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>
-
-# os.system() replacement which outputs through the logger
-
-import popen2
-import select
-import string
-
-from xen.xend.XendLogging import log
-
-def runscript(cmd):
-    # split after first space, then grab last component of path
-    cmdname = "[%s] " % cmd.split()[0].split('/')[-1]
-    # run command and grab stdin, stdout and stderr
-    cout, cin, cerr = popen2.popen3(cmd)
-    # close stdin to get command to terminate if it waits for input
-    cin.close()
-    # wait for output and process
-    p = select.poll()
-    p.register(cout)
-    p.register(cerr)
-    stdout = ""
-    while True:
-        r = p.poll()
-        for (fd, event) in r:
-            if event == select.POLLHUP:
-                cout.close()
-                cerr.close()
-                return stdout
-            if fd == cout.fileno():
-                stdout = stdout + cout.readline()
-            if fd == cerr.fileno():
-                l = cerr.readline()
-                if l[0] == '-':
-                    log.debug(cmdname + l[1:].rstrip())
-                elif l[0] == '*':
-                    log.info(cmdname + l[1:].rstrip())
-                else:
-                    log.error(cmdname + l.rstrip())
diff -r ff7c5a791ed5 -r fdea4a967bc7 tools/security/secpol_compat.h
--- a/tools/security/secpol_compat.h    Fri Oct 21 19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,14 +0,0 @@
-/* secpol_compat.h
- *     'translates' data types necessary to
- *     include <xen/acm.h>
- */
-#include <stdint.h>
-
-typedef uint8_t  u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
-typedef int8_t   s8;
-typedef int16_t  s16;
-typedef int32_t  s32;
-typedef int64_t  s64;
diff -r ff7c5a791ed5 -r fdea4a967bc7 xen/include/public/io/usbif.h
--- a/xen/include/public/io/usbif.h     Fri Oct 21 19:58:39 2005
+++ /dev/null   Mon Oct 24 15:08:13 2005
@@ -1,66 +0,0 @@
-/******************************************************************************
- * usbif.h
- * 
- * Unified block-device I/O interface for Xen guest OSes.
- * 
- * Copyright (c) 2003-2004, Keir Fraser
- */
-
-#ifndef __SHARED_USBIF_H__
-#define __SHARED_USBIF_H__
-
-#define usbif_vdev_t   u16
-#define usbif_sector_t u64
-
-#define USBIF_OP_IO      0 /* Request IO to a device */
-#define USBIF_OP_PROBE   1 /* Is there a device on this port? */
-#define USBIF_OP_RESET   2 /* Reset a virtual USB port.       */
-
-typedef struct {
-    unsigned long  id;           /* private guest value, echoed in resp  */
-    u8             operation;    /* USBIF_OP_???                         */
-    u8  __pad1;
-    usbif_vdev_t   port;         /* guest virtual USB port               */
-    unsigned long  devnum :7;    /* Device address, as seen by the guest.*/
-    unsigned long  endpoint :4;  /* Device endpoint.                         */
-    unsigned long  direction :1; /* Pipe direction.                          */
-    unsigned long  speed :1;     /* Pipe speed.                              */
-    unsigned long  pipe_type :2; /* Pipe type (iso, bulk, int, ctrl)         */
-    unsigned long  __pad2 :18;
-    unsigned long  transfer_buffer; /* Machine address */
-    unsigned long  length;          /* Buffer length */
-    unsigned long  transfer_flags;  /* For now just pass Linux transfer
-                                     * flags - this may change. */
-    unsigned char setup[8];         /* Embed setup packets directly. */
-    unsigned long  iso_schedule;    /* Machine address of transfer sched (iso
-                                     * only) */
-    unsigned long num_iso;        /* length of iso schedule */
-    unsigned long timeout;        /* timeout in ms */
-} usbif_request_t;
-
-/* Data we need to pass:
- * - Transparently handle short packets or complain at us?
- */
-
-typedef struct {
-    unsigned long   id;              /* copied from request         */
-    u8              operation;       /* copied from request         */
-    u8              data;            /* Small chunk of in-band data */
-    s16             status;          /* USBIF_RSP_???               */
-    unsigned long   transfer_mutex;  /* Used for cancelling requests 
atomically. */
-    unsigned long    length;         /* How much data we really got */
-} usbif_response_t;
-
-#define USBIF_RSP_ERROR  -1 /* non-specific 'error' */
-#define USBIF_RSP_OKAY    0 /* non-specific 'okay'  */
-
-DEFINE_RING_TYPES(usbif, usbif_request_t, usbif_response_t);
-
-typedef struct {
-    unsigned long length; /* IN = expected, OUT = actual */
-    unsigned long buffer_offset;  /* IN offset in buffer specified in main
-                                     packet */
-    unsigned long status; /* OUT Status for this packet. */
-} usbif_iso_t;
-
-#endif /* __SHARED_USBIF_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>