WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Indentation cleanups (remove hard t

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Indentation cleanups (remove hard tabs).
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 19 Aug 2006 02:41:13 +0000
Delivery-date: Fri, 18 Aug 2006 19:46:40 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 043a4aa247815157ed0c048585fb36b13a4cef6a
# Parent  7ecfd9b1f64101025ae86931a157c2318e4eb32c
[XEN] Indentation cleanups (remove hard tabs).
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/apic.c                 |   12 -
 xen/arch/x86/boot/x86_32.S          |   28 +-
 xen/arch/x86/delay.c                |   14 -
 xen/arch/x86/domain_build.c         |    2 
 xen/arch/x86/extable.c              |    4 
 xen/arch/x86/hvm/hvm.c              |    6 
 xen/arch/x86/hvm/i8259.c            |    6 
 xen/arch/x86/hvm/intercept.c        |    2 
 xen/arch/x86/hvm/svm/instrlen.c     |    2 
 xen/arch/x86/hvm/svm/svm.c          |  403 ++++++++++++++++++------------------
 xen/arch/x86/hvm/svm/vmcb.c         |   20 -
 xen/arch/x86/hvm/svm/x86_32/exits.S |   12 -
 xen/arch/x86/hvm/svm/x86_64/exits.S |   24 +-
 xen/arch/x86/hvm/vioapic.c          |    6 
 xen/arch/x86/hvm/vmx/vmx.c          |    6 
 xen/arch/x86/hvm/vmx/x86_32/exits.S |    2 
 xen/arch/x86/hvm/vmx/x86_64/exits.S |    2 
 xen/arch/x86/i387.c                 |    2 
 xen/arch/x86/nmi.c                  |    8 
 xen/arch/x86/x86_32/entry.S         |  102 ++++-----
 xen/arch/x86/x86_64/entry.S         |   46 ++--
 xen/drivers/video/vga.c             |    6 
 xen/include/asm-x86/grant_table.h   |    2 
 xen/include/asm-x86/hvm/support.h   |    2 
 xen/include/asm-x86/hvm/svm/vmcb.h  |    8 
 25 files changed, 367 insertions(+), 360 deletions(-)

diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/apic.c       Thu Aug 17 19:17:50 2006 +0100
@@ -10,8 +10,8 @@
  *                  thanks to Eric Gilmore
  *                  and Rolf G. Tews
  *                  for testing these extensively.
- *     Maciej W. Rozycki       :       Various updates and fixes.
- *     Mikael Pettersson       :       Power Management for UP-APIC.
+ *    Maciej W. Rozycki :   Various updates and fixes.
+ *    Mikael Pettersson :   Power Management for UP-APIC.
  *    Pavel Machek and
  *    Mikael Pettersson    :    PM converted to driver model.
  */
@@ -166,7 +166,7 @@ void clear_local_APIC(void)
         apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED);
 #endif
     v = GET_APIC_VERSION(apic_read(APIC_LVR));
-    if (APIC_INTEGRATED(v)) {  /* !82489DX */
+    if (APIC_INTEGRATED(v)) {  /* !82489DX */
         if (maxlvt > 3)        /* Due to Pentium errata 3AP and 11AP. */
             apic_write(APIC_ESR, 0);
         apic_read(APIC_ESR);
@@ -878,9 +878,9 @@ int __init calibrate_APIC_clock(void)
                     ((long)(t2-t1)/LOOPS)%(1000000/HZ));
 
     apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
-               "%ld.%04ld MHz.\n",
-               result/(1000000/HZ),
-               result%(1000000/HZ));
+                "%ld.%04ld MHz.\n",
+                result/(1000000/HZ),
+                result%(1000000/HZ));
 
     /* set up multipliers for accurate timer code */
     bus_freq   = result*HZ;
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/boot/x86_32.S
--- a/xen/arch/x86/boot/x86_32.S        Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/boot/x86_32.S        Thu Aug 17 19:17:50 2006 +0100
@@ -7,22 +7,22 @@
 
 #define  SECONDARY_CPU_FLAG 0xA5A5A5A5
                 
-               .text
+        .text
 
 ENTRY(start)
 ENTRY(stext)
 ENTRY(_stext)
         jmp __start
 
-        .align 4
+        .align 4
 
 /*** MULTIBOOT HEADER ****/
         /* Magic number indicating a Multiboot header. */
-       .long   0x1BADB002
-       /* Flags to bootloader (see Multiboot spec). */
-       .long   0x00000003
-       /* Checksum: must be the negated sum of the first two fields. */
-       .long   -0x1BADB005
+        .long 0x1BADB002
+        /* Flags to bootloader (see Multiboot spec). */
+        .long 0x00000003
+        /* Checksum: must be the negated sum of the first two fields. */
+        .long -0x1BADB005
         
 not_multiboot_msg:
         .asciz "ERR: Not a Multiboot bootloader!"
@@ -57,8 +57,8 @@ 1:      lss     stack_start-__PAGE_OFFSE
         add     $(STACK_SIZE-CPUINFO_sizeof-__PAGE_OFFSET),%esp
 
         /* Reset EFLAGS (subsumes CLI and CLD). */
-       pushl   $0
-       popf
+        pushl   $0
+        popf
 
         /* Set up FPU. */
         fninit
@@ -172,7 +172,7 @@ 1:      /* Paging enabled, so we can now
         je      start_secondary
 
         /* Call into main C routine. This should never return.*/
-               call    __start_xen
+        call    __start_xen
         ud2     /* Force a panic (invalid opcode). */
 
 /* This is the default interrupt handler. */
@@ -203,19 +203,19 @@ ENTRY(stack_start)
         
         .word   0    
 idt_descr:
-        .word  256*8-1
+        .word   256*8-1
 idt:
-        .long  idt_table
+        .long   idt_table
 
         .word   0
 gdt_descr:
-        .word  LAST_RESERVED_GDT_BYTE
+        .word   LAST_RESERVED_GDT_BYTE
 gdt:
         .long   gdt_table - FIRST_RESERVED_GDT_BYTE
 
         .word   0
 nopaging_gdt_descr:
-        .word  LAST_RESERVED_GDT_BYTE
+        .word   LAST_RESERVED_GDT_BYTE
         .long   gdt_table - FIRST_RESERVED_GDT_BYTE - __PAGE_OFFSET
         
         .org 0x1000
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/delay.c
--- a/xen/arch/x86/delay.c      Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/delay.c      Thu Aug 17 19:17:50 2006 +0100
@@ -1,13 +1,13 @@
 /*
- *     Precise Delay Loops for i386
+ * Precise Delay Loops for i386
  *
- *     Copyright (C) 1993 Linus Torvalds
- *     Copyright (C) 1997 Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>
  *
- *     The __delay function must _NOT_ be inlined as its execution time
- *     depends wildly on alignment on many x86 processors. The additional
- *     jump magic is needed to get the timing stable on all the CPU's
- *     we have to worry about.
+ * The __delay function must _NOT_ be inlined as its execution time
+ * depends wildly on alignment on many x86 processors. The additional
+ * jump magic is needed to get the timing stable on all the CPU's
+ * we have to worry about.
  */
 
 #include <xen/config.h>
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/domain_build.c       Thu Aug 17 19:17:50 2006 +0100
@@ -119,7 +119,7 @@ static void process_dom0_ioports_disable
                    "in dom0_ioports_disable, skipping\n", t);
             continue;
         }
-       
+
         if ( *u == '\0' )
             io_to = io_from;
         else if ( *u == '-' )
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/extable.c
--- a/xen/arch/x86/extable.c    Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/extable.c    Thu Aug 17 19:17:50 2006 +0100
@@ -41,8 +41,8 @@ void sort_exception_tables(void)
 
 static inline unsigned long
 search_one_table(const struct exception_table_entry *first,
-                const struct exception_table_entry *last,
-                unsigned long value)
+                 const struct exception_table_entry *last,
+                 unsigned long value)
 {
     const struct exception_table_entry *mid;
     long diff;
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Aug 17 19:17:50 2006 +0100
@@ -426,12 +426,12 @@ void hvm_print_line(struct vcpu *v, cons
 
     if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
         if (*index == HVM_PBUF_SIZE-2)
-           pbuf[(*index)++] = c;
+            pbuf[(*index)++] = c;
         pbuf[*index] = '\0';
         printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
-       *index = 0;
+        *index = 0;
     } else
-       pbuf[(*index)++] = c;
+        pbuf[(*index)++] = c;
 }
 
 typedef unsigned long hvm_hypercall_t(
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/i8259.c
--- a/xen/arch/x86/hvm/i8259.c  Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/i8259.c  Thu Aug 17 19:17:50 2006 +0100
@@ -57,7 +57,7 @@ static inline void pic_set_irq1(PicState
         if (level) {
             if ((s->last_irr & mask) == 0) {
                 s->irr |= mask;
-           }
+            }
             s->last_irr |= mask;
         } else {
             s->last_irr &= ~mask;
@@ -237,7 +237,7 @@ static void update_shared_irr(struct hvm
     BUG_ON(!spin_is_locked(&s->lock));
 
     get_sp(current->domain)->sp_global.pic_elcr = 
-               s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
+        s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
     pl =(uint8_t*)&get_sp(current->domain)->sp_global.pic_last_irr;
     pe =(uint8_t*)&get_sp(current->domain)->sp_global.pic_elcr;
     if ( c == &s->pics[0] ) {
@@ -550,7 +550,7 @@ static int intercept_elcr_io(ioreq_t *p)
         spin_lock_irqsave(&s->lock, flags);
         elcr_ioport_write((void*)&s->pics[p->addr&1],
                 (uint32_t) p->addr, (uint32_t)( data & 0xff));
-       get_sp(current->domain)->sp_global.pic_elcr = 
+        get_sp(current->domain)->sp_global.pic_elcr = 
             s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
         spin_unlock_irqrestore(&s->lock, flags);
     }
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/intercept.c      Thu Aug 17 19:17:50 2006 +0100
@@ -284,7 +284,7 @@ static __inline__ void missed_ticks(stru
 
     missed_ticks = NOW() - pt->scheduled;
     if ( missed_ticks > 0 ) {
-       missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
+        missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
         if ( missed_ticks > 1000 ) {
             /* TODO: Adjust guest time togther */
             pt->pending_intr_nr++;
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/svm/instrlen.c
--- a/xen/arch/x86/hvm/svm/instrlen.c   Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/svm/instrlen.c   Thu Aug 17 19:17:50 2006 +0100
@@ -464,7 +464,7 @@ done_prefixes:
             case 4: insn_fetch(int32_t, 4, _regs.eip, length); break;
             }
             goto done;
-       }
+        }
         break;
     }
 
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Aug 17 19:17:50 2006 +0100
@@ -58,7 +58,7 @@ extern int inst_copy_from_guest(unsigned
                                 int inst_len);
 extern asmlinkage void do_IRQ(struct cpu_user_regs *);
 extern void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
-       unsigned long count, int size, long value, int dir, int pvalid);
+                         unsigned long count, int size, long value, int dir, 
int pvalid);
 extern int svm_instrlen(struct cpu_user_regs *regs, int mode);
 extern void svm_dump_inst(unsigned long eip);
 extern int svm_dbg_on;
@@ -66,7 +66,7 @@ void svm_dump_regs(const char *from, str
 
 static void svm_relinquish_guest_resources(struct domain *d);
 static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
-        struct cpu_user_regs *regs);
+                                            struct cpu_user_regs *regs);
 
 /* va of hardware host save area     */
 static void *hsa[NR_CPUS] __read_mostly;
@@ -107,7 +107,7 @@ void asidpool_init(int core)
     /* Host ASID is always in use */
     per_cpu(asid_pool,core).asid[INITIAL_ASID] = ASID_INUSE;
     for ( i = 1; i < ASID_MAX; i++ )
-       per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
+        per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
 }
 
 
@@ -139,7 +139,7 @@ static int asidpool_fetch_next(struct vm
  *                           available.
  */
 int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
-                             int oldcore, int newcore )
+                          int oldcore, int newcore )
 {
     int i;
     int res = 1;
@@ -147,8 +147,8 @@ int asidpool_assign_next( struct vmcb_st
 
     spin_lock(&per_cpu(asid_pool,oldcore).asid_lock);
     if( retire_current && vmcb->guest_asid ) {
-       per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
-           ASID_RETIRED;
+        per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
+            ASID_RETIRED;
     }
     spin_unlock(&per_cpu(asid_pool,oldcore).asid_lock);
     spin_lock(&per_cpu(asid_pool,newcore).asid_lock);
@@ -171,12 +171,12 @@ int asidpool_assign_next( struct vmcb_st
 
 void asidpool_retire( struct vmcb_struct *vmcb, int core )
 {
-   spin_lock(&per_cpu(asid_pool,core).asid_lock);
-   if( vmcb->guest_asid ) {
-       per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
-           ASID_RETIRED;
-   }
-   spin_unlock(&per_cpu(asid_pool,core).asid_lock);
+    spin_lock(&per_cpu(asid_pool,core).asid_lock);
+    if( vmcb->guest_asid ) {
+        per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
+            ASID_RETIRED;
+    }
+    spin_unlock(&per_cpu(asid_pool,core).asid_lock);
 }
 
 static inline void svm_inject_exception(struct vcpu *v, int trap, 
@@ -286,26 +286,26 @@ static inline int long_mode_do_msr_read(
         break;
 
     case MSR_STAR:
-         msr_content = vmcb->star;
-         break;
+        msr_content = vmcb->star;
+        break;
  
     case MSR_LSTAR:
-         msr_content = vmcb->lstar;
-         break;
+        msr_content = vmcb->lstar;
+        break;
  
     case MSR_CSTAR:
-         msr_content = vmcb->cstar;
-         break;
+        msr_content = vmcb->cstar;
+        break;
  
     case MSR_SYSCALL_MASK:
-         msr_content = vmcb->sfmask;
-         break;
+        msr_content = vmcb->sfmask;
+        break;
     default:
         return 0;
     }
 
     HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n", 
-            msr_content);
+                msr_content);
 
     regs->eax = msr_content & 0xffffffff;
     regs->edx = msr_content >> 32;
@@ -378,24 +378,24 @@ static inline int long_mode_do_msr_write
         break;
 
     case MSR_SHADOW_GS_BASE:
-         vmcb->kerngsbase = msr_content;
-         break;
+        vmcb->kerngsbase = msr_content;
+        break;
  
     case MSR_STAR:
-         vmcb->star = msr_content;
-         break;
+        vmcb->star = msr_content;
+        break;
  
     case MSR_LSTAR:
-         vmcb->lstar = msr_content;
-         break;
+        vmcb->lstar = msr_content;
+        break;
  
     case MSR_CSTAR:
-         vmcb->cstar = msr_content;
-         break;
+        vmcb->cstar = msr_content;
+        break;
  
     case MSR_SYSCALL_MASK:
-         vmcb->sfmask = msr_content;
-         break;
+        vmcb->sfmask = msr_content;
+        break;
 
     default:
         return 0;
@@ -581,9 +581,9 @@ static inline int svm_do_debugout(unsign
 
 #if 0
     if ((exit_code == 0x4E 
-                || exit_code == VMEXIT_CR0_READ 
-                || exit_code == VMEXIT_CR0_WRITE) 
-            && counter < 200000)
+         || exit_code == VMEXIT_CR0_READ 
+         || exit_code == VMEXIT_CR0_WRITE) 
+        && counter < 200000)
         return 0;
 
     if ((exit_code == 0x4E) && counter < 500000)
@@ -688,18 +688,18 @@ static void arch_svm_do_launch(struct vc
 #endif
     if (v->vcpu_id != 0) 
     {
-       u16     cs_sel = regs->cs;
-       /*
+        u16 cs_sel = regs->cs;
+        /*
          * This is the launch of an AP; set state so that we begin executing
-        * the trampoline code in real-mode.
+         * the trampoline code in real-mode.
          */
-       svm_do_vmmcall_reset_to_realmode(v, regs);      
-       /* Adjust the state to execute the trampoline code.*/
-       v->arch.hvm_svm.vmcb->rip = 0;
-       v->arch.hvm_svm.vmcb->cs.sel= cs_sel;
-       v->arch.hvm_svm.vmcb->cs.base = (cs_sel << 4);
-    }
-       
+        svm_do_vmmcall_reset_to_realmode(v, regs);  
+        /* Adjust the state to execute the trampoline code.*/
+        v->arch.hvm_svm.vmcb->rip = 0;
+        v->arch.hvm_svm.vmcb->cs.sel= cs_sel;
+        v->arch.hvm_svm.vmcb->cs.base = (cs_sel << 4);
+    }
+      
     reset_stack_and_jump(svm_asm_do_launch);
 }
 
@@ -776,7 +776,7 @@ int start_svm(void)
     u64 phys_hsa;
     int cpu = smp_processor_id();
  
-   /* Xen does not fill x86_capability words except 0. */
+    /* Xen does not fill x86_capability words except 0. */
     ecx = cpuid_ecx(0x80000001);
     boot_cpu_data.x86_capability[5] = ecx;
     
@@ -888,7 +888,7 @@ void arch_svm_do_resume(struct vcpu *v)
     else {
         if (svm_dbg_on)
             printk("VCPU core pinned: %d to %d\n", 
-                v->arch.hvm_svm.launch_core, smp_processor_id() );
+                   v->arch.hvm_svm.launch_core, smp_processor_id() );
         v->arch.hvm_svm.launch_core = smp_processor_id();
         svm_migrate_timers( v );
         hvm_do_resume( v );
@@ -910,8 +910,8 @@ static int svm_do_page_fault(unsigned lo
 //#if HVM_DEBUG
     eip = vmcb->rip;
     HVM_DBG_LOG(DBG_LEVEL_VMMU, 
-            "svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
-            va, eip, (unsigned long)regs->error_code);
+                "svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
+                va, eip, (unsigned long)regs->error_code);
 //#endif
 
     result = shadow2_fault(va, regs); 
@@ -938,7 +938,7 @@ static void svm_do_no_device_fault(struc
 
 
 static void svm_do_general_protection_fault(struct vcpu *v, 
-        struct cpu_user_regs *regs) 
+                                            struct cpu_user_regs *regs) 
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     unsigned long eip, error_code;
@@ -961,10 +961,10 @@ static void svm_do_general_protection_fa
                 eip, error_code);
 
     HVM_DBG_LOG(DBG_LEVEL_1, 
-            "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
-            (unsigned long)regs->eax, (unsigned long)regs->ebx,
-            (unsigned long)regs->ecx, (unsigned long)regs->edx,
-            (unsigned long)regs->esi, (unsigned long)regs->edi);
+                "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
+                (unsigned long)regs->eax, (unsigned long)regs->ebx,
+                (unsigned long)regs->ecx, (unsigned long)regs->edx,
+                (unsigned long)regs->esi, (unsigned long)regs->edi);
       
     /* Reflect it back into the guest */
     svm_inject_exception(v, TRAP_gp_fault, 1, error_code);
@@ -976,7 +976,7 @@ static void svm_do_general_protection_fa
 #define SVM_VCPU_CPUID_L1_EDX_RESERVED 0xe8740400
 
 static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb, unsigned long input, 
-        struct cpu_user_regs *regs) 
+                                struct cpu_user_regs *regs) 
 {
     unsigned int eax, ebx, ecx, edx;
     unsigned long eip;
@@ -988,18 +988,18 @@ static void svm_vmexit_do_cpuid(struct v
     eip = vmcb->rip;
 
     HVM_DBG_LOG(DBG_LEVEL_1, 
-            "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
-            " (esi) %lx, (edi) %lx",
-            (unsigned long)regs->eax, (unsigned long)regs->ebx,
-            (unsigned long)regs->ecx, (unsigned long)regs->edx,
-            (unsigned long)regs->esi, (unsigned long)regs->edi);
+                "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
+                " (esi) %lx, (edi) %lx",
+                (unsigned long)regs->eax, (unsigned long)regs->ebx,
+                (unsigned long)regs->ecx, (unsigned long)regs->edx,
+                (unsigned long)regs->esi, (unsigned long)regs->edi);
 
     cpuid(input, &eax, &ebx, &ecx, &edx);
 
     if (input == 0x00000001)
     {
         if ( !hvm_apic_support(v->domain) ||
-                !vlapic_global_enabled((VLAPIC(v))) )
+             !vlapic_global_enabled((VLAPIC(v))) )
         {
             /* Since the apic is disabled, avoid any confusion 
                about SMP cpus being available */
@@ -1091,9 +1091,9 @@ static void svm_vmexit_do_cpuid(struct v
     regs->edx = (unsigned long)edx;
 
     HVM_DBG_LOG(DBG_LEVEL_1, 
-            "svm_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, "
-            "ebx=%x, ecx=%x, edx=%x",
-            eip, input, eax, ebx, ecx, edx);
+                "svm_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, "
+                "ebx=%x, ecx=%x, edx=%x",
+                eip, input, eax, ebx, ecx, edx);
 
     inst_len = __get_instruction_length(vmcb, INSTR_CPUID, NULL);
     ASSERT(inst_len > 0);
@@ -1102,7 +1102,7 @@ static void svm_vmexit_do_cpuid(struct v
 
 
 static inline unsigned long *get_reg_p(unsigned int gpreg, 
-        struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
+                                       struct cpu_user_regs *regs, struct 
vmcb_struct *vmcb)
 {
     unsigned long *reg_p = NULL;
     switch (gpreg)
@@ -1166,7 +1166,7 @@ static inline unsigned long *get_reg_p(u
 
 
 static inline unsigned long get_reg(unsigned int gpreg, 
-        struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
+                                    struct cpu_user_regs *regs, struct 
vmcb_struct *vmcb)
 {
     unsigned long *gp;
     gp = get_reg_p(gpreg, regs, vmcb);
@@ -1175,7 +1175,7 @@ static inline unsigned long get_reg(unsi
 
 
 static inline void set_reg(unsigned int gpreg, unsigned long value, 
-        struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
+                           struct cpu_user_regs *regs, struct vmcb_struct 
*vmcb)
 {
     unsigned long *gp;
     gp = get_reg_p(gpreg, regs, vmcb);
@@ -1184,7 +1184,7 @@ static inline void set_reg(unsigned int 
                            
 
 static void svm_dr_access (struct vcpu *v, unsigned int reg, unsigned int type,
-        struct cpu_user_regs *regs)
+                           struct cpu_user_regs *regs)
 {
     unsigned long *reg_p = 0;
     unsigned int gpreg = 0;
@@ -1212,7 +1212,7 @@ static void svm_dr_access (struct vcpu *
     ASSERT(reg == decode_dest_reg(prefix, buffer[index + 2]));
 
     HVM_DBG_LOG(DBG_LEVEL_1, "svm_dr_access : eip=%lx, reg=%d, gpreg = %x",
-            eip, reg, gpreg);
+                eip, reg, gpreg);
 
     reg_p = get_reg_p(gpreg, regs, vmcb);
         
@@ -1244,7 +1244,7 @@ static void svm_get_prefix_info(
 
     memset(inst, 0, MAX_INST_LEN);
     if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst)) 
-            != MAX_INST_LEN) 
+        != MAX_INST_LEN) 
     {
         printk("%s: get guest instruction failed\n", __func__);
         domain_crash_synchronous();
@@ -1531,8 +1531,8 @@ static int svm_set_cr0(unsigned long val
     {
         /* The guest CR3 must be pointing to the guest physical. */
         if (!VALID_MFN(mfn = 
-                    get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT))
-                || !get_page(mfn_to_page(mfn), v->domain))
+                       get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> 
PAGE_SHIFT))
+            || !get_page(mfn_to_page(mfn), v->domain))
         {
             printk("Invalid CR3 value = %lx\n", v->arch.hvm_svm.cpu_cr3);
             domain_crash_synchronous(); /* need to take a clean path */
@@ -1540,8 +1540,8 @@ static int svm_set_cr0(unsigned long val
 
 #if defined(__x86_64__)
         if (test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state) 
-                && !test_bit(SVM_CPU_STATE_PAE_ENABLED, 
-                    &v->arch.hvm_svm.cpu_state))
+            && !test_bit(SVM_CPU_STATE_PAE_ENABLED, 
+                         &v->arch.hvm_svm.cpu_state))
         {
             HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
             svm_inject_exception(v, TRAP_gp_fault, 1, 0);
@@ -1565,7 +1565,7 @@ static int svm_set_cr0(unsigned long val
         shadow2_update_paging_modes(v);
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
-                (unsigned long) (mfn << PAGE_SHIFT));
+                    (unsigned long) (mfn << PAGE_SHIFT));
 
         vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
         set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
@@ -1574,7 +1574,7 @@ static int svm_set_cr0(unsigned long val
     if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
         if ( v->arch.hvm_svm.cpu_cr3 ) {
             put_page(mfn_to_page(get_mfn_from_gpfn(
-                      v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
+                v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
             v->arch.guest_table = pagetable_null();
         }
 
@@ -1621,7 +1621,7 @@ static void mov_from_cr(int cr, int gp, 
         value = v->arch.hvm_svm.cpu_shadow_cr0;
         if (svm_dbg_on)
             printk("CR0 read =%lx \n", value );
-          break;
+        break;
     case 2:
         value = vmcb->cr2;
         break;
@@ -1629,11 +1629,11 @@ static void mov_from_cr(int cr, int gp, 
         value = (unsigned long) v->arch.hvm_svm.cpu_cr3;
         if (svm_dbg_on)
             printk("CR3 read =%lx \n", value );
-          break;
+        break;
     case 4:
         value = (unsigned long) v->arch.hvm_svm.cpu_shadow_cr4;
         if (svm_dbg_on)
-           printk( "CR4 read=%lx\n", value );
+            printk( "CR4 read=%lx\n", value );
         break;
     case 8:
 #if 0
@@ -1655,7 +1655,7 @@ static void mov_from_cr(int cr, int gp, 
 
 static inline int svm_pgbit_test(struct vcpu *v)
 {
-   return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
+    return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
 }
 
 
@@ -1716,8 +1716,8 @@ static int mov_to_cr(int gpreg, int cr, 
              */
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
             if (((value >> PAGE_SHIFT) > v->domain->max_pages) 
-                    || !VALID_MFN(mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT))
-                    || !get_page(mfn_to_page(mfn), v->domain))
+                || !VALID_MFN(mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT))
+                || !get_page(mfn_to_page(mfn), v->domain))
             {
                 printk("Invalid CR3 value=%lx\n", value);
                 domain_crash_synchronous(); /* need to take a clean path */
@@ -1744,7 +1744,7 @@ static int mov_to_cr(int gpreg, int cr, 
     {
         if (svm_dbg_on)
             printk( "write cr4=%lx, cr0=%lx\n", 
-                     value,  v->arch.hvm_svm.cpu_shadow_cr0 );
+                    value,  v->arch.hvm_svm.cpu_shadow_cr0 );
         old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
         if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
         {
@@ -1756,7 +1756,7 @@ static int mov_to_cr(int gpreg, int cr, 
                 unsigned long mfn, old_base_mfn;
 
                 if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
-                                    v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)) ||
+                    v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)) ||
                      !get_page(mfn_to_page(mfn), v->domain) )
                 {
                     printk("Invalid CR3 value = %lx", v->arch.hvm_svm.cpu_cr3);
@@ -1826,7 +1826,7 @@ static int mov_to_cr(int gpreg, int cr, 
 
 
 static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
-        struct cpu_user_regs *regs)
+                         struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     int inst_len = 0;
@@ -1850,13 +1850,13 @@ static int svm_cr_access(struct vcpu *v,
     
     if (type == TYPE_MOV_TO_CR) 
     {
-        inst_len = __get_instruction_length_from_list(vmcb, list_a, 
-                ARR_SIZE(list_a), &buffer[index], &match);
+        inst_len = __get_instruction_length_from_list(
+            vmcb, list_a, ARR_SIZE(list_a), &buffer[index], &match);
     }
     else
     {
-        inst_len = __get_instruction_length_from_list(vmcb, list_b, 
-                ARR_SIZE(list_b), &buffer[index], &match);
+        inst_len = __get_instruction_length_from_list(
+            vmcb, list_b, ARR_SIZE(list_b), &buffer[index], &match);
     }
 
     ASSERT(inst_len > 0);
@@ -1898,7 +1898,7 @@ static int svm_cr_access(struct vcpu *v,
 
         if (svm_dbg_on)
             printk("CR0-LMSW value=%lx, reg=%d, inst_len=%d\n", value, gpreg, 
-                    inst_len);
+                   inst_len);
 
         value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value;
 
@@ -1917,7 +1917,7 @@ static int svm_cr_access(struct vcpu *v,
 
         if (svm_dbg_on)
             printk("CR0-SMSW value=%lx, reg=%d, inst_len=%d\n", value, gpreg, 
-                    inst_len);
+                   inst_len);
         break;
 
     default:
@@ -1943,9 +1943,9 @@ static inline void svm_do_msr_access(
     ASSERT(vmcb);
 
     HVM_DBG_LOG(DBG_LEVEL_1, "svm_do_msr_access: ecx=%lx, eax=%lx, edx=%lx, "
-            "exitinfo = %lx", (unsigned long)regs->ecx, 
-            (unsigned long)regs->eax, (unsigned long)regs->edx, 
-            (unsigned long)vmcb->exitinfo1);
+                "exitinfo = %lx", (unsigned long)regs->ecx, 
+                (unsigned long)regs->eax, (unsigned long)regs->edx, 
+                (unsigned long)vmcb->exitinfo1);
 
     /* is it a read? */
     if (vmcb->exitinfo1 == 0)
@@ -2015,7 +2015,7 @@ static inline void svm_do_msr_access(
         }
     }
 
-done:
+ done:
 
     HVM_DBG_LOG(DBG_LEVEL_1, "svm_do_msr_access returns: "
                 "ecx=%lx, eax=%lx, edx=%lx",
@@ -2033,7 +2033,7 @@ static inline void svm_vmexit_do_hlt(str
     /* Check for interrupt not handled or new interrupt. */
     if ( (vmcb->rflags & X86_EFLAGS_IF) &&
          (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
-       return;
+        return;
 
     hvm_hlt(vmcb->rflags);
 }
@@ -2062,7 +2062,7 @@ static void svm_vmexit_do_invd(struct vm
 
 #ifdef XEN_DEBUGGER
 static void svm_debug_save_cpu_user_regs(struct vmcb_struct *vmcb, 
-        struct cpu_user_regs *regs)
+                                         struct cpu_user_regs *regs)
 {
     regs->eip = vmcb->rip;
     regs->esp = vmcb->rsp;
@@ -2110,7 +2110,7 @@ void svm_handle_invlpg(const short invlp
     {
         printk("svm_handle_invlpg (): Error reading memory %d bytes\n", 
                length);
-       __hvm_bug(regs);
+        __hvm_bug(regs);
     }
 
     if (invlpga)
@@ -2141,7 +2141,7 @@ void svm_handle_invlpg(const short invlp
          * the system in either 32- or 64-bit mode.
          */
         g_vaddr = get_effective_addr_modrm64(vmcb, regs, prefix, 
-                            &opcode[inst_len], &length);
+                                             &opcode[inst_len], &length);
 
         inst_len += length;
         __update_guest_eip (vmcb, inst_len);
@@ -2160,7 +2160,7 @@ void svm_handle_invlpg(const short invlp
  * returns 0 on success, non-zero otherwise
  */
 static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v, 
-        struct cpu_user_regs *regs)
+                                            struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb;
 
@@ -2523,7 +2523,7 @@ void walk_shadow_and_guest_pt(unsigned l
     gpa = shadow2_gva_to_gpa(current, gva);
     printk( "gva = %lx, gpa=%lx, gCR3=%x\n", gva, gpa, (u32)vmcb->cr3 );
     if( !svm_paging_enabled(v) || mmio_space(gpa) )
-       return;
+        return;
 
     /* let's dump the guest and shadow page info */
 
@@ -2546,7 +2546,7 @@ void walk_shadow_and_guest_pt(unsigned l
     printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) );
 
     BUG(); // need to think about this, and convert usage of
-           // phys_to_machine_mapping to use pagetable format...
+    // phys_to_machine_mapping to use pagetable format...
     __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ], 
                       sizeof(spte) );
 
@@ -2581,100 +2581,105 @@ asmlinkage void svm_vmexit_handler(struc
     }
 
 #ifdef SVM_EXTRA_DEBUG
-{
+    {
 #if defined(__i386__)
-#define        rip     eip
+#define rip eip
 #endif
 
-    static unsigned long intercepts_counter = 0;
-
-    if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 
-    {
-        if (svm_paging_enabled(v) && 
-            !mmio_space(shadow2_gva_to_gpa(current, vmcb->exitinfo2)))
-        {
-            printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx, "
-                   "gpa=%llx\n", intercepts_counter,
-                    exit_reasons[exit_reason], exit_reason, regs.cs,
-                   (unsigned long long) regs.rip,
-                   (unsigned long long) vmcb->exitinfo1,
-                   (unsigned long long) vmcb->exitinfo2,
-                   (unsigned long long) vmcb->exitintinfo.bytes,
-            (unsigned long long) shadow2_gva_to_gpa(current, vmcb->exitinfo2));
-        }
-        else 
-        {
-            printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx\n", 
-                    intercepts_counter,
-                    exit_reasons[exit_reason], exit_reason, regs.cs,
-                   (unsigned long long) regs.rip,
-                   (unsigned long long) vmcb->exitinfo1,
-                   (unsigned long long) vmcb->exitinfo2,
-                   (unsigned long long) vmcb->exitintinfo.bytes );
-        }
-    } 
-    else if ( svm_dbg_on 
-              && exit_reason != VMEXIT_IOIO 
-              && exit_reason != VMEXIT_INTR) 
-    {
-
-        if (exit_reasons[exit_reason])
-        {
-            printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx\n", 
-                    intercepts_counter,
-                    exit_reasons[exit_reason], exit_reason, regs.cs,
-                   (unsigned long long) regs.rip,
-                   (unsigned long long) vmcb->exitinfo1,
-                   (unsigned long long) vmcb->exitinfo2,
-                   (unsigned long long) vmcb->exitintinfo.bytes);
+        static unsigned long intercepts_counter = 0;
+
+        if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 
+        {
+            if (svm_paging_enabled(v) && 
+                !mmio_space(shadow2_gva_to_gpa(current, vmcb->exitinfo2)))
+            {
+                printk("I%08ld,ExC=%s(%d),IP=%x:%llx,"
+                       "I1=%llx,I2=%llx,INT=%llx, "
+                       "gpa=%llx\n", intercepts_counter,
+                       exit_reasons[exit_reason], exit_reason, regs.cs,
+                       (unsigned long long) regs.rip,
+                       (unsigned long long) vmcb->exitinfo1,
+                       (unsigned long long) vmcb->exitinfo2,
+                       (unsigned long long) vmcb->exitintinfo.bytes,
+                       (unsigned long long) shadow2_gva_to_gpa(current, 
vmcb->exitinfo2));
+            }
+            else 
+            {
+                printk("I%08ld,ExC=%s(%d),IP=%x:%llx,"
+                       "I1=%llx,I2=%llx,INT=%llx\n", 
+                       intercepts_counter,
+                       exit_reasons[exit_reason], exit_reason, regs.cs,
+                       (unsigned long long) regs.rip,
+                       (unsigned long long) vmcb->exitinfo1,
+                       (unsigned long long) vmcb->exitinfo2,
+                       (unsigned long long) vmcb->exitintinfo.bytes );
+            }
         } 
-        else 
-        {
-            
printk("I%08ld,ExC=%d(0x%x),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx\n", 
-                    intercepts_counter, exit_reason, exit_reason, regs.cs, 
-                   (unsigned long long) regs.rip,
-                   (unsigned long long) vmcb->exitinfo1,
-                   (unsigned long long) vmcb->exitinfo2,
-                   (unsigned long long) vmcb->exitintinfo.bytes);
-        }
-    }
+        else if ( svm_dbg_on 
+                  && exit_reason != VMEXIT_IOIO 
+                  && exit_reason != VMEXIT_INTR) 
+        {
+
+            if (exit_reasons[exit_reason])
+            {
+                printk("I%08ld,ExC=%s(%d),IP=%x:%llx,"
+                       "I1=%llx,I2=%llx,INT=%llx\n", 
+                       intercepts_counter,
+                       exit_reasons[exit_reason], exit_reason, regs.cs,
+                       (unsigned long long) regs.rip,
+                       (unsigned long long) vmcb->exitinfo1,
+                       (unsigned long long) vmcb->exitinfo2,
+                       (unsigned long long) vmcb->exitintinfo.bytes);
+            } 
+            else 
+            {
+                printk("I%08ld,ExC=%d(0x%x),IP=%x:%llx,"
+                       "I1=%llx,I2=%llx,INT=%llx\n", 
+                       intercepts_counter, exit_reason, exit_reason, regs.cs, 
+                       (unsigned long long) regs.rip,
+                       (unsigned long long) vmcb->exitinfo1,
+                       (unsigned long long) vmcb->exitinfo2,
+                       (unsigned long long) vmcb->exitintinfo.bytes);
+            }
+        }
 
 #ifdef SVM_WALK_GUEST_PAGES
-    if( exit_reason == VMEXIT_EXCEPTION_PF 
-        && ( ( vmcb->exitinfo2 == vmcb->rip )
-        || vmcb->exitintinfo.bytes) )
-    {
-       if (svm_paging_enabled(v) && !mmio_space(gva_to_gpa(vmcb->exitinfo2)))
-           walk_shadow_and_guest_pt( vmcb->exitinfo2 );
-    }
+        if( exit_reason == VMEXIT_EXCEPTION_PF 
+            && ( ( vmcb->exitinfo2 == vmcb->rip )
+                 || vmcb->exitintinfo.bytes) )
+        {
+            if ( svm_paging_enabled(v) &&
+                 !mmio_space(gva_to_gpa(vmcb->exitinfo2)) )
+                walk_shadow_and_guest_pt(vmcb->exitinfo2);
+        }
 #endif
 
-    intercepts_counter++;
+        intercepts_counter++;
 
 #if 0
-    if (svm_dbg_on)
-        do_debug = svm_do_debugout(exit_reason);
+        if (svm_dbg_on)
+            do_debug = svm_do_debugout(exit_reason);
 #endif
 
-    if (do_debug)
-    {
-        printk("%s:+ guest_table = 0x%08x, monitor_table = 0x%08x, "
-                "shadow_table = 0x%08x\n", 
-                __func__,
-               (int) v->arch.guest_table.pfn,
-               (int) v->arch.monitor_table.pfn, 
-                (int) v->arch.shadow_table.pfn);
-
-        svm_dump_vmcb(__func__, vmcb);
-        svm_dump_regs(__func__, &regs);
-        svm_dump_inst(svm_rip2pointer(vmcb));
-    }
+        if (do_debug)
+        {
+            printk("%s:+ guest_table = 0x%08x, monitor_table = 0x%08x, "
+                   "shadow_table = 0x%08x\n", 
+                   __func__,
+                   (int) v->arch.guest_table.pfn,
+                   (int) v->arch.monitor_table.pfn, 
+                   (int) v->arch.shadow_table.pfn);
+
+            svm_dump_vmcb(__func__, vmcb);
+            svm_dump_regs(__func__, &regs);
+            svm_dump_inst(svm_rip2pointer(vmcb));
+        }
 
 #if defined(__i386__)
-#undef rip
+#undef rip
 #endif
 
-}
+    }
 #endif /* SVM_EXTRA_DEBUG */
 
 
@@ -2685,7 +2690,7 @@ asmlinkage void svm_vmexit_handler(struc
     if (do_debug)
     {
         printk("eip = %lx, exit_reason = %d (0x%x)\n", 
-                eip, exit_reason, exit_reason);
+               eip, exit_reason, exit_reason);
     }
 #endif /* SVM_EXTRA_DEBUG */
 
@@ -2754,10 +2759,10 @@ asmlinkage void svm_vmexit_handler(struc
         va = vmcb->exitinfo2;
         regs.error_code = vmcb->exitinfo1;
         HVM_DBG_LOG(DBG_LEVEL_VMMU, 
-                "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
-                (unsigned long)regs.eax, (unsigned long)regs.ebx,
-                (unsigned long)regs.ecx, (unsigned long)regs.edx,
-                (unsigned long)regs.esi, (unsigned long)regs.edi);
+                    "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
+                    (unsigned long)regs.eax, (unsigned long)regs.ebx,
+                    (unsigned long)regs.ecx, (unsigned long)regs.edx,
+                    (unsigned long)regs.esi, (unsigned long)regs.edi);
 
         if (!(error = svm_do_page_fault(va, &regs))) 
         {
@@ -2767,7 +2772,7 @@ asmlinkage void svm_vmexit_handler(struc
             v->arch.hvm_svm.cpu_cr2 = va;
             vmcb->cr2 = va;
             TRACE_3D(TRC_VMX_INT, v->domain->domain_id, 
-                    VMEXIT_EXCEPTION_PF, va);
+                     VMEXIT_EXCEPTION_PF, va);
         }
         break;
     }
@@ -2922,8 +2927,8 @@ asmlinkage void svm_vmexit_handler(struc
     default:
         printk("unexpected VMEXIT: exit reason = 0x%x, exitinfo1 = %llx, "
                "exitinfo2 = %llx\n", exit_reason, 
-                                    (unsigned long long)vmcb->exitinfo1, 
-                                    (unsigned long long)vmcb->exitinfo2);
+               (unsigned long long)vmcb->exitinfo1, 
+               (unsigned long long)vmcb->exitinfo2);
         __hvm_bug(&regs);       /* should not happen */
         break;
     }
@@ -2938,10 +2943,10 @@ asmlinkage void svm_vmexit_handler(struc
     if (do_debug) 
     {
         printk("vmexit_handler():- guest_table = 0x%08x, "
-                "monitor_table = 0x%08x, shadow_table = 0x%08x\n",
-                (int)v->arch.guest_table.pfn,
-               (int)v->arch.monitor_table.pfn, 
-                (int)v->arch.shadow_table.pfn);
+               "monitor_table = 0x%08x, shadow_table = 0x%08x\n",
+               (int)v->arch.guest_table.pfn,
+               (int)v->arch.monitor_table.pfn, 
+               (int)v->arch.shadow_table.pfn);
         printk("svm_vmexit_handler: Returning\n");
     }
 #endif
@@ -2962,15 +2967,17 @@ asmlinkage void svm_asid(void)
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-   /*
-    * if need to assign new asid, or if switching cores,
-    * retire asid for the old core, and assign a new asid to the current core.
-    */
+    /*
+     * if need to assign new asid, or if switching cores,
+     * retire asid for the old core, and assign a new asid to the current core.
+     */
     if ( test_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags ) ||
-       ( v->arch.hvm_svm.asid_core != v->arch.hvm_svm.launch_core )) {
+         ( v->arch.hvm_svm.asid_core != v->arch.hvm_svm.launch_core )) {
         /* recycle asid */
-        if ( !asidpool_assign_next( vmcb, 1,
-            v->arch.hvm_svm.asid_core, v->arch.hvm_svm.launch_core )) {
+        if ( !asidpool_assign_next(vmcb, 1,
+                                   v->arch.hvm_svm.asid_core,
+                                   v->arch.hvm_svm.launch_core) )
+        {
             /* If we get here, we have a major problem */
             domain_crash_synchronous();
         }
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu Aug 17 19:17:50 2006 +0100
@@ -396,7 +396,7 @@ void svm_do_launch(struct vcpu *v)
     v->arch.hvm_svm.saved_irq_vector = -1;
 
     hvm_set_guest_time(v, 0);
-       
+
     if (svm_dbg_on)
         svm_dump_vmcb(__func__, vmcb);
 
@@ -427,24 +427,24 @@ void svm_dump_vmcb(const char *from, str
            vmcb->general1_intercepts, vmcb->general2_intercepts);
     printf("iopm_base_pa = %016llx msrpm_base_pa = 0x%016llx tsc_offset = "
             "0x%016llx\n", 
-           (unsigned long long) vmcb->iopm_base_pa,
-           (unsigned long long) vmcb->msrpm_base_pa,
-           (unsigned long long) vmcb->tsc_offset);
+           (unsigned long long) vmcb->iopm_base_pa,
+           (unsigned long long) vmcb->msrpm_base_pa,
+           (unsigned long long) vmcb->tsc_offset);
     printf("tlb_control = 0x%08x vintr = 0x%016llx interrupt_shadow = "
             "0x%016llx\n", vmcb->tlb_control,
-           (unsigned long long) vmcb->vintr.bytes,
-           (unsigned long long) vmcb->interrupt_shadow);
+           (unsigned long long) vmcb->vintr.bytes,
+           (unsigned long long) vmcb->interrupt_shadow);
     printf("exitcode = 0x%016llx exitintinfo = 0x%016llx\n", 
            (unsigned long long) vmcb->exitcode,
-          (unsigned long long) vmcb->exitintinfo.bytes);
+           (unsigned long long) vmcb->exitintinfo.bytes);
     printf("exitinfo1 = 0x%016llx exitinfo2 = 0x%016llx \n",
            (unsigned long long) vmcb->exitinfo1,
-          (unsigned long long) vmcb->exitinfo2);
+           (unsigned long long) vmcb->exitinfo2);
     printf("np_enable = 0x%016llx guest_asid = 0x%03x\n", 
            (unsigned long long) vmcb->np_enable, vmcb->guest_asid);
     printf("cpl = %d efer = 0x%016llx star = 0x%016llx lstar = 0x%016llx\n", 
            vmcb->cpl, (unsigned long long) vmcb->efer,
-          (unsigned long long) vmcb->star, (unsigned long long) vmcb->lstar);
+           (unsigned long long) vmcb->star, (unsigned long long) vmcb->lstar);
     printf("CR0 = 0x%016llx CR2 = 0x%016llx\n",
            (unsigned long long) vmcb->cr0, (unsigned long long) vmcb->cr2);
     printf("CR3 = 0x%016llx CR4 = 0x%016llx\n", 
@@ -460,7 +460,7 @@ void svm_dump_vmcb(const char *from, str
            (unsigned long long) vmcb->sfmask);
     printf("KernGSBase = 0x%016llx PAT = 0x%016llx \n", 
            (unsigned long long) vmcb->kerngsbase,
-          (unsigned long long) vmcb->g_pat);
+           (unsigned long long) vmcb->g_pat);
     
     /* print out all the selectors */
     svm_dump_sel("CS", &vmcb->cs);
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S       Thu Aug 17 19:17:50 2006 +0100
@@ -56,8 +56,8 @@
  * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
  */
 
-#define HVM_MONITOR_EFLAGS     0x202 /* IF on */
-#define NR_SKIPPED_REGS        6       /* See the above explanation */
+#define HVM_MONITOR_EFLAGS 0x202 /* IF on */
+#define NR_SKIPPED_REGS    6     /* See the above explanation */
 #define HVM_SAVE_ALL_NOSEGREGS \
         pushl $HVM_MONITOR_EFLAGS; \
         popf; \
@@ -95,8 +95,8 @@ ENTRY(svm_asm_do_launch)
         movl VCPU_svm_vmcb(%ebx), %ecx
         movl 24(%esp), %eax
         movl %eax, VMCB_rax(%ecx)
-       movl VCPU_processor(%ebx), %eax
-       movl root_vmcb_pa(,%eax,8), %eax
+        movl VCPU_processor(%ebx), %eax
+        movl root_vmcb_pa(,%eax,8), %eax
         VMSAVE
 
         movl VCPU_svm_vmcb_pa(%ebx), %eax
@@ -120,8 +120,8 @@ ENTRY(svm_asm_do_launch)
 
         GET_CURRENT(%eax)
 
-       movl VCPU_processor(%eax), %eax
-       movl root_vmcb_pa(,%eax,8), %eax
+        movl VCPU_processor(%eax), %eax
+        movl root_vmcb_pa(,%eax,8), %eax
         VMLOAD
 
         HVM_SAVE_ALL_NOSEGREGS
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S       Thu Aug 17 19:17:50 2006 +0100
@@ -52,8 +52,8 @@
  * (2/1)  u32 entry_vector;
  * (1/1)  u32 error_code;
  */
-#define HVM_MONITOR_RFLAGS     0x202 /* IF on */
-#define NR_SKIPPED_REGS        6       /* See the above explanation */
+#define HVM_MONITOR_RFLAGS 0x202 /* IF on */
+#define NR_SKIPPED_REGS    6     /* See the above explanation */
 #define HVM_SAVE_ALL_NOSEGREGS \
         pushq $HVM_MONITOR_RFLAGS; \
         popfq; \
@@ -105,10 +105,10 @@ ENTRY(svm_asm_do_launch)
         movq VCPU_svm_vmcb(%rbx), %rcx
         movq UREGS_rax(%rsp), %rax
         movq %rax, VMCB_rax(%rcx)
-       leaq root_vmcb_pa(%rip), %rax
-       movl VCPU_processor(%rbx), %ecx
-       shll $3, %ecx
-       addq %rcx, %rax
+        leaq root_vmcb_pa(%rip), %rax
+        movl VCPU_processor(%rbx), %ecx
+        shll $3, %ecx
+        addq %rcx, %rax
         VMSAVE
 
         movq VCPU_svm_vmcb_pa(%rbx), %rax
@@ -139,10 +139,10 @@ ENTRY(svm_asm_do_launch)
         HVM_SAVE_ALL_NOSEGREGS
 
         GET_CURRENT(%rbx)
-       movl VCPU_processor(%rbx), %ecx
-       leaq root_vmcb_pa(%rip), %rax
-       shll $3, %ecx
-       addq %rcx, %rax
+        movl VCPU_processor(%rbx), %ecx
+        leaq root_vmcb_pa(%rip), %rax
+        shll $3, %ecx
+        addq %rcx, %rax
         VMLOAD
 
         STGI
@@ -151,13 +151,13 @@ ENTRY(svm_asm_do_launch)
 
 ENTRY(svm_asm_do_resume)
 svm_test_all_events:
-       GET_CURRENT(%rbx)
+        GET_CURRENT(%rbx)
         movq %rbx, %rdi
         call hvm_do_resume
 /*test_all_events:*/
         cli                             # tests must not race interrupts
 /*test_softirqs:*/
-       movl  VCPU_processor(%rbx),%eax
+        movl  VCPU_processor(%rbx),%eax
         shl   $IRQSTAT_shift, %rax
         leaq  irq_stat(%rip), %rdx
         testl $~0, (%rdx, %rax, 1)
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c        Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/vioapic.c        Thu Aug 17 19:17:50 2006 +0100
@@ -44,7 +44,7 @@
 #define IRQ0_SPECIAL_ROUTING 1
 
 #if defined(__ia64__)
-#define        opt_hvm_debug_level     opt_vmx_debug_level
+#define opt_hvm_debug_level opt_vmx_debug_level
 #endif
 
 static void ioapic_enable(hvm_vioapic_t *s, uint8_t enable)
@@ -264,7 +264,7 @@ static void hvm_vioapic_reset(hvm_vioapi
 
     for (i = 0; i < IOAPIC_NUM_PINS; i++) {
         s->redirtbl[i].RedirForm.mask = 0x1;
-       hvm_vioapic_update_imr(s, i);
+        hvm_vioapic_update_imr(s, i);
     }
 }
 
@@ -364,7 +364,7 @@ static uint32_t ioapic_get_delivery_bitm
 
     if (dest_mode == 0) { /* Physical mode */
         for (i = 0; i < s->lapic_count; i++) {
-           if (VLAPIC_ID(s->lapic_info[i]) == dest) {
+            if (VLAPIC_ID(s->lapic_info[i]) == dest) {
                 mask = 1 << i;
                 break;
             }
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Aug 17 19:17:50 2006 +0100
@@ -146,7 +146,7 @@ static void vmx_relinquish_guest_resourc
 
     if ( d->arch.hvm_domain.shared_page_va )
         unmap_domain_page_global(
-               (void *)d->arch.hvm_domain.shared_page_va);
+            (void *)d->arch.hvm_domain.shared_page_va);
 
     if ( d->arch.hvm_domain.buffered_io_va )
         unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
@@ -595,7 +595,7 @@ static int vmx_instruction_length(struct
     unsigned long inst_len;
 
     if (__vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len))
-       return 0;
+        return 0;
     return inst_len;
 }
 
@@ -1094,7 +1094,7 @@ static int check_for_null_selector(unsig
 
 extern void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
                          unsigned long count, int size, long value,
-                        int dir, int pvalid);
+                         int dir, int pvalid);
 
 static void vmx_io_instruction(unsigned long exit_qualification,
                                unsigned long inst_len)
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S       Thu Aug 17 19:17:50 2006 +0100
@@ -55,7 +55,7 @@
  * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
  */
 
-#define NR_SKIPPED_REGS        6       /* See the above explanation */
+#define NR_SKIPPED_REGS 6 /* See the above explanation */
 #define HVM_SAVE_ALL_NOSEGREGS                                              \
         subl $(NR_SKIPPED_REGS*4), %esp;                                    \
         movl $0, 0xc(%esp);  /* XXX why do we need to force eflags==0 ?? */ \
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S       Thu Aug 17 19:17:50 2006 +0100
@@ -51,7 +51,7 @@
  * (2/1)  u32 entry_vector;
  * (1/1)  u32 error_code;
  */
-#define NR_SKIPPED_REGS        6       /* See the above explanation */
+#define NR_SKIPPED_REGS 6 /* See the above explanation */
 #define HVM_SAVE_ALL_NOSEGREGS                  \
         subq $(NR_SKIPPED_REGS*8), %rsp;        \
         pushq %rdi;                             \
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/i387.c       Thu Aug 17 19:17:50 2006 +0100
@@ -5,7 +5,7 @@
  *
  *  Pentium III FXSR, SSE support
  *  General FPU state handling cleanups
- *     Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
+ *  Gareth Hughes <gareth@xxxxxxxxxxx>, May 2000
  */
 
 #include <xen/config.h>
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/nmi.c
--- a/xen/arch/x86/nmi.c        Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/nmi.c        Thu Aug 17 19:17:50 2006 +0100
@@ -6,11 +6,11 @@
  *  Started by Ingo Molnar <mingo@xxxxxxxxxx>
  *
  *  Fixes:
- *  Mikael Pettersson  : AMD K7 support for local APIC NMI watchdog.
- *  Mikael Pettersson  : Power Management for local APIC NMI watchdog.
- *  Mikael Pettersson  : Pentium 4 support for local APIC NMI watchdog.
+ *  Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
+ *  Mikael Pettersson : Power Management for local APIC NMI watchdog.
+ *  Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
  *  Pavel Machek and
- *  Mikael Pettersson  : PM converted to driver model. Disable/enable API.
+ *  Mikael Pettersson : PM converted to driver model. Disable/enable API.
  */
 
 #include <xen/config.h>
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/x86_32/entry.S       Thu Aug 17 19:17:50 2006 +0100
@@ -142,11 +142,11 @@ 1:      call  create_bounce_frame
         jmp   test_all_events
 .previous
 .section __pre_ex_table,"a"
-       .long FLT1,FIX1
-       .long FLT2,FIX1
-       .long FLT3,FIX1
-       .long FLT4,FIX1
-       .long FLT5,FIX5
+        .long FLT1,FIX1
+        .long FLT2,FIX1
+        .long FLT3,FIX1
+        .long FLT4,FIX1
+        .long FLT5,FIX5
 .previous
 .section __ex_table,"a"
         .long DBLFLT1,failsafe_callback
@@ -154,13 +154,13 @@ 1:      call  create_bounce_frame
 
         ALIGN
 restore_all_xen:
-       popl %ebx
-       popl %ecx
-       popl %edx
-       popl %esi
-       popl %edi
-       popl %ebp
-       popl %eax
+        popl %ebx
+        popl %ecx
+        popl %edx
+        popl %esi
+        popl %edi
+        popl %ebp
+        popl %eax
         addl $4,%esp
         iret
 
@@ -168,7 +168,7 @@ ENTRY(hypercall)
 ENTRY(hypercall)
         subl $4,%esp
         FIXUP_RING0_GUEST_STACK
-       SAVE_ALL(b)
+        SAVE_ALL(b)
         sti
         GET_CURRENT(%ebx)
         cmpl  $NR_hypercalls,%eax
@@ -244,8 +244,8 @@ process_softirqs:
         sti       
         call do_softirq
         jmp  test_all_events
-       
-       ALIGN
+
+        ALIGN
 process_nmi:
         movl VCPU_nmi_addr(%ebx),%eax
         test %eax,%eax
@@ -369,13 +369,13 @@ nvm86_3:/* Rewrite our stack frame and r
         movb $0,TRAPBOUNCE_flags(%edx)
         ret
 .section __ex_table,"a"
-       .long  FLT6,domain_crash_synchronous ,  FLT7,domain_crash_synchronous
+        .long  FLT6,domain_crash_synchronous ,  FLT7,domain_crash_synchronous
         .long  FLT8,domain_crash_synchronous ,  FLT9,domain_crash_synchronous
         .long FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
         .long FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
         .long FLT14,domain_crash_synchronous , FLT15,domain_crash_synchronous
         .long FLT16,domain_crash_synchronous , FLT17,domain_crash_synchronous
-       .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
+        .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
         .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
         .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
         .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
@@ -399,8 +399,8 @@ ENTRY(ret_from_intr)
         jmp   restore_all_xen
 
 ENTRY(divide_error)
-       pushl $TRAP_divide_error<<16
-       ALIGN
+        pushl $TRAP_divide_error<<16
+        ALIGN
 handle_exception:
         FIXUP_RING0_GUEST_STACK
         SAVE_ALL_NOSEGREGS(a)
@@ -411,15 +411,15 @@ handle_exception:
         xorl  %eax,%eax
         movw  UREGS_entry_vector(%esp),%ax
         movl  %esp,%edx
-       pushl %edx                      # push the cpu_user_regs pointer
-       GET_CURRENT(%ebx)
+        pushl %edx                      # push the cpu_user_regs pointer
+        GET_CURRENT(%ebx)
         PERFC_INCR(PERFC_exceptions, %eax)
-       call  *exception_table(,%eax,4)
+        call  *exception_table(,%eax,4)
         addl  $4,%esp
         movl  UREGS_eflags(%esp),%eax
         movb  UREGS_cs(%esp),%al
         testl $(3|X86_EFLAGS_VM),%eax
-       jz    restore_all_xen
+        jz    restore_all_xen
         leal  VCPU_trap_bounce(%ebx),%edx
         testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
         jz    test_all_events
@@ -450,78 +450,78 @@ FATAL_exception_with_ints_disabled:
         xorl  %esi,%esi
         movw  UREGS_entry_vector(%esp),%si
         movl  %esp,%edx
-       pushl %edx                      # push the cpu_user_regs pointer
+        pushl %edx                      # push the cpu_user_regs pointer
         pushl %esi                      # push the trapnr (entry vector)
         call  fatal_trap
         ud2
                                         
 ENTRY(coprocessor_error)
-       pushl $TRAP_copro_error<<16
-       jmp   handle_exception
+        pushl $TRAP_copro_error<<16
+        jmp   handle_exception
 
 ENTRY(simd_coprocessor_error)
-       pushl $TRAP_simd_error<<16
-       jmp   handle_exception
+        pushl $TRAP_simd_error<<16
+        jmp   handle_exception
 
 ENTRY(device_not_available)
-       pushl $TRAP_no_device<<16
+        pushl $TRAP_no_device<<16
         jmp   handle_exception
 
 ENTRY(debug)
-       pushl $TRAP_debug<<16
-       jmp   handle_exception
+        pushl $TRAP_debug<<16
+        jmp   handle_exception
 
 ENTRY(int3)
-       pushl $TRAP_int3<<16
-       jmp   handle_exception
+        pushl $TRAP_int3<<16
+        jmp   handle_exception
 
 ENTRY(overflow)
-       pushl $TRAP_overflow<<16
-       jmp   handle_exception
+        pushl $TRAP_overflow<<16
+        jmp   handle_exception
 
 ENTRY(bounds)
-       pushl $TRAP_bounds<<16
-       jmp   handle_exception
+        pushl $TRAP_bounds<<16
+        jmp   handle_exception
 
 ENTRY(invalid_op)
-       pushl $TRAP_invalid_op<<16
-       jmp   handle_exception
+        pushl $TRAP_invalid_op<<16
+        jmp   handle_exception
 
 ENTRY(coprocessor_segment_overrun)
-       pushl $TRAP_copro_seg<<16
-       jmp   handle_exception
+        pushl $TRAP_copro_seg<<16
+        jmp   handle_exception
 
 ENTRY(invalid_TSS)
         movw  $TRAP_invalid_tss,2(%esp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(segment_not_present)
         movw  $TRAP_no_segment,2(%esp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(stack_segment)
         movw  $TRAP_stack_error,2(%esp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(general_protection)
         movw  $TRAP_gp_fault,2(%esp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(alignment_check)
         movw  $TRAP_alignment_check,2(%esp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(page_fault)
         movw  $TRAP_page_fault,2(%esp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(machine_check)
         pushl $TRAP_machine_check<<16
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(spurious_interrupt_bug)
         pushl $TRAP_spurious_int<<16
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(nmi)
 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
@@ -530,8 +530,8 @@ ENTRY(nmi)
 #else
         # Save state but do not trash the segment registers!
         # We may otherwise be unable to reload them or copy them to ring 1. 
-       pushl %eax
-       SAVE_ALL_NOSEGREGS(a)
+        pushl %eax
+        SAVE_ALL_NOSEGREGS(a)
 
         # We can only process the NMI if:
         #  A. We are the outermost Xen activation (in which case we have
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/arch/x86/x86_64/entry.S       Thu Aug 17 19:17:50 2006 +0100
@@ -87,7 +87,7 @@ 1:      call  create_bounce_frame
         jmp   test_all_events
 .previous
 .section __pre_ex_table,"a"
-       .quad FLT1,FIX1
+        .quad FLT1,FIX1
 .previous
 .section __ex_table,"a"
         .quad DBLFLT1,failsafe_callback
@@ -201,7 +201,7 @@ process_softirqs:
         call do_softirq
         jmp  test_all_events
 
-       ALIGN
+        ALIGN
 /* %rbx: struct vcpu */
 process_nmi:
         movq VCPU_nmi_addr(%rbx),%rax
@@ -396,12 +396,12 @@ ENTRY(coprocessor_error)
 ENTRY(coprocessor_error)
         pushq $0
         movl  $TRAP_copro_error,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(simd_coprocessor_error)
         pushq $0
         movl  $TRAP_simd_error,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(device_not_available)
         pushq $0
@@ -411,66 +411,66 @@ ENTRY(debug)
 ENTRY(debug)
         pushq $0
         movl  $TRAP_debug,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(int3)
         pushq $0
-       movl  $TRAP_int3,4(%rsp)
-       jmp   handle_exception
+        movl  $TRAP_int3,4(%rsp)
+        jmp   handle_exception
 
 ENTRY(overflow)
         pushq $0
-       movl  $TRAP_overflow,4(%rsp)
-       jmp   handle_exception
+        movl  $TRAP_overflow,4(%rsp)
+        jmp   handle_exception
 
 ENTRY(bounds)
         pushq $0
-       movl  $TRAP_bounds,4(%rsp)
-       jmp   handle_exception
+        movl  $TRAP_bounds,4(%rsp)
+        jmp   handle_exception
 
 ENTRY(invalid_op)
         pushq $0
-       movl  $TRAP_invalid_op,4(%rsp)
-       jmp   handle_exception
+        movl  $TRAP_invalid_op,4(%rsp)
+        jmp   handle_exception
 
 ENTRY(coprocessor_segment_overrun)
         pushq $0
-       movl  $TRAP_copro_seg,4(%rsp)
-       jmp   handle_exception
+        movl  $TRAP_copro_seg,4(%rsp)
+        jmp   handle_exception
 
 ENTRY(invalid_TSS)
         movl  $TRAP_invalid_tss,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(segment_not_present)
         movl  $TRAP_no_segment,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(stack_segment)
         movl  $TRAP_stack_error,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(general_protection)
         movl  $TRAP_gp_fault,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(alignment_check)
         movl  $TRAP_alignment_check,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(page_fault)
         movl  $TRAP_page_fault,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(machine_check)
         pushq $0
         movl  $TRAP_machine_check,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(spurious_interrupt_bug)
         pushq $0
         movl  $TRAP_spurious_int,4(%rsp)
-       jmp   handle_exception
+        jmp   handle_exception
 
 ENTRY(double_fault)
         SAVE_ALL
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/drivers/video/vga.c
--- a/xen/drivers/video/vga.c   Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/drivers/video/vga.c   Thu Aug 17 19:17:50 2006 +0100
@@ -484,14 +484,14 @@ static int vga_load_font(const struct fo
     /* First, the Sequencer */
     vga_wseq(vgabase, VGA_SEQ_RESET, 0x1);
     /* CPU writes only to map 2 */
-    vga_wseq(vgabase, VGA_SEQ_PLANE_WRITE, 0x04);      
+    vga_wseq(vgabase, VGA_SEQ_PLANE_WRITE, 0x04);
     /* Sequential addressing */
-    vga_wseq(vgabase, VGA_SEQ_MEMORY_MODE, 0x07);      
+    vga_wseq(vgabase, VGA_SEQ_MEMORY_MODE, 0x07);
     /* Clear synchronous reset */
     vga_wseq(vgabase, VGA_SEQ_RESET, 0x03);
 
     /* Now, the graphics controller, select map 2 */
-    vga_wgfx(vgabase, VGA_GFX_PLANE_READ, 0x02);               
+    vga_wgfx(vgabase, VGA_GFX_PLANE_READ, 0x02);
     /* disable odd-even addressing */
     vga_wgfx(vgabase, VGA_GFX_MODE, 0x00);
     /* map start at A000:0000 */
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/include/asm-x86/grant_table.h
--- a/xen/include/asm-x86/grant_table.h Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/include/asm-x86/grant_table.h Thu Aug 17 19:17:50 2006 +0100
@@ -35,7 +35,7 @@ int destroy_grant_host_mapping(
 
 static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
 {
-       clear_bit(nr, addr);
+    clear_bit(nr, addr);
 }
 
 #endif /* __ASM_GRANT_TABLE_H__ */
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/include/asm-x86/hvm/support.h Thu Aug 17 19:17:50 2006 +0100
@@ -32,7 +32,7 @@
 #define HVM_DEBUG 1
 #endif
 
-#define        hvm_guest(v)    ((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
+#define hvm_guest(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
 
 static inline shared_iopage_t *get_sp(struct domain *d)
 {
diff -r 7ecfd9b1f641 -r 043a4aa24781 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Thu Aug 17 19:17:30 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Thu Aug 17 19:17:50 2006 +0100
@@ -373,7 +373,7 @@ struct vmcb_struct {
     u32 guest_asid;             /* offset 0x58 */
     u8  tlb_control;            /* offset 0x5C */
     u8  res07[3];
-    vintr_t vintr;             /* offset 0x60 */
+    vintr_t vintr;              /* offset 0x60 */
     u64 interrupt_shadow;       /* offset 0x68 */
     u64 exitcode;               /* offset 0x70 */
     u64 exitinfo1;              /* offset 0x78 */
@@ -399,9 +399,9 @@ struct vmcb_struct {
     u8 res11[3];
     u8 cpl;
     u32 res12;
-    u64 efer;                  /* offset 1024 + 0xD0 */
+    u64 efer;                   /* offset 1024 + 0xD0 */
     u64 res13[14];
-    u64 cr4;                   /* loffset 1024 + 0x148 */
+    u64 cr4;                    /* loffset 1024 + 0x148 */
     u64 cr3;
     u64 cr0;
     u64 dr7;
@@ -433,7 +433,7 @@ struct vmcb_struct {
 
 
 struct arch_svm_struct {
-    struct vmcb_struct *vmcb;
+    struct vmcb_struct *vmcb;
     u64                 vmcb_pa;
     u32                 *iopm;
     u32                 *msrpm;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Indentation cleanups (remove hard tabs)., Xen patchbot-unstable <=