WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] svm: Remove special 'launch' scheduler ta

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] svm: Remove special 'launch' scheduler tail function.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 20 Mar 2007 09:50:23 -0700
Delivery-date: Tue, 20 Mar 2007 09:51:34 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1174390125 0
# Node ID cabf9e221cd506b1f23609a8a696a08124901d7a
# Parent  4e380c76977476b36d92dddd388e9091f0410e38
svm: Remove special 'launch' scheduler tail function.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c          |   52 +++++++++++++-----------------------
 xen/arch/x86/hvm/svm/vmcb.c         |   14 ++-------
 xen/arch/x86/hvm/svm/x86_32/exits.S |   40 +++++++++------------------
 xen/arch/x86/hvm/svm/x86_64/exits.S |   34 ++++++++---------------
 xen/include/asm-x86/hvm/svm/svm.h   |    2 -
 xen/include/asm-x86/hvm/svm/vmcb.h  |    2 -
 6 files changed, 48 insertions(+), 96 deletions(-)

diff -r 4e380c769774 -r cabf9e221cd5 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Mar 19 16:55:21 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Mar 20 11:28:45 2007 +0000
@@ -485,7 +485,6 @@ int svm_vmcb_restore(struct vcpu *v, str
          * first.
          */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
-        /* current!=vcpu as not called by arch_vmx_do_launch */
         mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
         if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) 
             goto bad_cr3;
@@ -921,17 +920,6 @@ static void svm_load_cpu_guest_regs(
     svm_load_cpu_user_regs(v, regs);
 }
 
-static void arch_svm_do_launch(struct vcpu *v) 
-{
-    svm_do_launch(v);
-
-    if ( paging_mode_hap(v->domain) ) {
-        v->arch.hvm_svm.vmcb->h_cr3 = 
pagetable_get_paddr(v->domain->arch.phys_table);
-    }
-
-    reset_stack_and_jump(svm_asm_do_launch);
-}
-
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
     svm_save_dr(v);
@@ -953,15 +941,29 @@ static void svm_ctxt_switch_to(struct vc
     svm_restore_dr(v);
 }
 
+static void arch_svm_do_resume(struct vcpu *v) 
+{
+    if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
+    {
+        v->arch.hvm_svm.launch_core = smp_processor_id();
+        hvm_migrate_timers(v);
+    }
+
+    hvm_do_resume(v);
+    reset_stack_and_jump(svm_asm_do_resume);
+}
+
 static int svm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
-    v->arch.schedule_tail    = arch_svm_do_launch;
+    v->arch.schedule_tail    = arch_svm_do_resume;
     v->arch.ctxt_switch_from = svm_ctxt_switch_from;
     v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
 
     v->arch.hvm_svm.saved_irq_vector = -1;
+
+    v->arch.hvm_svm.launch_core = -1;
 
     if ( (rc = svm_create_vmcb(v)) != 0 )
     {
@@ -1026,10 +1028,12 @@ void svm_npt_detect(void)
 
     /* check CPUID for nested paging support */
     cpuid(0x8000000A, &eax, &ebx, &ecx, &edx);
-    if ( edx & 0x01 ) { /* nested paging */
+    if ( edx & 0x01 ) /* nested paging */
+    {
         hap_capable_system = 1;
     }
-    else if ( opt_hap_enabled ) {
+    else if ( opt_hap_enabled )
+    {
         printk(" nested paging is not supported by this CPU.\n");
         hap_capable_system = 0; /* no nested paging, we disable flag. */
     }
@@ -1085,24 +1089,6 @@ int start_svm(void)
     hvm_enable(&svm_function_table);
 
     return 1;
-}
-
-void arch_svm_do_resume(struct vcpu *v) 
-{
-    /* pinning VCPU to a different core? */
-    if ( v->arch.hvm_svm.launch_core == smp_processor_id()) {
-        hvm_do_resume( v );
-        reset_stack_and_jump( svm_asm_do_resume );
-    }
-    else {
-        if (svm_dbg_on)
-            printk("VCPU core pinned: %d to %d\n", 
-                   v->arch.hvm_svm.launch_core, smp_processor_id() );
-        v->arch.hvm_svm.launch_core = smp_processor_id();
-        hvm_migrate_timers( v );
-        hvm_do_resume( v );
-        reset_stack_and_jump( svm_asm_do_resume );
-    }
 }
 
 static int svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs)
diff -r 4e380c769774 -r cabf9e221cd5 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Mon Mar 19 16:55:21 2007 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue Mar 20 11:28:45 2007 +0000
@@ -196,11 +196,13 @@ static int construct_vmcb(struct vcpu *v
 
     arch_svm->vmcb->exception_intercepts = MONITOR_DEFAULT_EXCEPTION_BITMAP;
 
-    if ( paging_mode_hap(v->domain) ) {
+    if ( paging_mode_hap(v->domain) )
+    {
         vmcb->cr0 = arch_svm->cpu_shadow_cr0;
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG;
+        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
     }
 
     return 0;
@@ -245,16 +247,6 @@ void svm_destroy_vmcb(struct vcpu *v)
     }
 
     arch_svm->vmcb = NULL;
-}
-
-void svm_do_launch(struct vcpu *v)
-{
-    hvm_stts(v);
-
-    /* current core is the one we intend to perform the VMRUN on */
-    v->arch.hvm_svm.launch_core = smp_processor_id();
-
-    v->arch.schedule_tail = arch_svm_do_resume;
 }
 
 static void svm_dump_sel(char *name, svm_segment_register_t *s)
diff -r 4e380c769774 -r cabf9e221cd5 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S       Mon Mar 19 16:55:21 2007 +0000
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S       Tue Mar 20 11:28:45 2007 +0000
@@ -80,15 +80,24 @@
         popl %eax;  \
         addl $(NR_SKIPPED_REGS*4), %esp
 
-        ALIGN
-
 #define VMRUN  .byte 0x0F,0x01,0xD8
 #define VMLOAD .byte 0x0F,0x01,0xDA
 #define VMSAVE .byte 0x0F,0x01,0xDB
 #define STGI   .byte 0x0F,0x01,0xDC
 #define CLGI   .byte 0x0F,0x01,0xDD
 
-ENTRY(svm_asm_do_launch)
+ENTRY(svm_asm_do_resume)
+        GET_CURRENT(%ebx)
+        xorl %ecx,%ecx
+        notl %ecx
+        cli                             # tests must not race interrupts
+        movl VCPU_processor(%ebx),%eax
+        shl  $IRQSTAT_shift,%eax
+        test %ecx,irq_stat(%eax,1)
+        jnz  svm_process_softirqs
+        call svm_intr_assist
+        call svm_load_cr2
+
         CLGI                
         sti
         GET_CURRENT(%ebx)
@@ -135,30 +144,7 @@ svm_stgi_label:
         jmp  svm_asm_do_resume
 
         ALIGN
-
-ENTRY(svm_asm_do_resume)
-svm_test_all_events:
-        GET_CURRENT(%ebx)
-/*test_all_events:*/
-        xorl %ecx,%ecx
-        notl %ecx
-        cli                             # tests must not race interrupts
-/*test_softirqs:*/  
-        movl VCPU_processor(%ebx),%eax
-        shl  $IRQSTAT_shift,%eax
-        test %ecx,irq_stat(%eax,1)
-        jnz  svm_process_softirqs
-svm_restore_all_guest:
-        call svm_intr_assist
-        call svm_load_cr2
-        /* 
-         * Check if we are going back to AMD-V based VM
-         * By this time, all the setups in the VMCB must be complete.
-         */
-        jmp svm_asm_do_launch
-
-        ALIGN
 svm_process_softirqs:
         sti       
         call do_softirq
-        jmp  svm_test_all_events
+        jmp  svm_asm_do_resume
diff -r 4e380c769774 -r cabf9e221cd5 xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S       Mon Mar 19 16:55:21 2007 +0000
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S       Tue Mar 20 11:28:45 2007 +0000
@@ -98,7 +98,17 @@
 #define STGI   .byte 0x0F,0x01,0xDC
 #define CLGI   .byte 0x0F,0x01,0xDD
 
-ENTRY(svm_asm_do_launch)
+ENTRY(svm_asm_do_resume)
+        GET_CURRENT(%rbx)
+        cli                             # tests must not race interrupts
+        movl VCPU_processor(%rbx),%eax
+        shl  $IRQSTAT_shift, %rax
+        leaq irq_stat(%rip), %rdx
+        testl $~0, (%rdx, %rax, 1)
+        jnz  svm_process_softirqs
+        call svm_intr_assist
+        call svm_load_cr2
+
         CLGI                
         sti
         GET_CURRENT(%rbx)
@@ -150,28 +160,8 @@ svm_stgi_label:
         call svm_vmexit_handler
         jmp  svm_asm_do_resume
 
-ENTRY(svm_asm_do_resume)
-svm_test_all_events:
-        GET_CURRENT(%rbx)
-/*test_all_events:*/
-        cli                             # tests must not race interrupts
-/*test_softirqs:*/
-        movl  VCPU_processor(%rbx),%eax
-        shl   $IRQSTAT_shift, %rax
-        leaq  irq_stat(%rip), %rdx
-        testl $~0, (%rdx, %rax, 1)
-        jnz   svm_process_softirqs
-svm_restore_all_guest:
-        call svm_intr_assist
-        call svm_load_cr2
-        /*
-         * Check if we are going back to AMD-V based VM
-         * By this time, all the setups in the VMCB must be complete.
-         */
-        jmp svm_asm_do_launch
-
         ALIGN
 svm_process_softirqs:
         sti
         call do_softirq
-        jmp  svm_test_all_events
+        jmp  svm_asm_do_resume
diff -r 4e380c769774 -r cabf9e221cd5 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Mon Mar 19 16:55:21 2007 +0000
+++ b/xen/include/asm-x86/hvm/svm/svm.h Tue Mar 20 11:28:45 2007 +0000
@@ -29,8 +29,6 @@
 #include <asm/i387.h>
 
 extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
-extern void svm_do_launch(struct vcpu *v);
-extern void arch_svm_do_resume(struct vcpu *v);
 
 extern u64 root_vmcb_pa[NR_CPUS];
 
diff -r 4e380c769774 -r cabf9e221cd5 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Mon Mar 19 16:55:21 2007 +0000
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Mar 20 11:28:45 2007 +0000
@@ -447,7 +447,7 @@ struct arch_svm_struct {
     u32                 *msrpm;
     u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
     int                 saved_irq_vector;
-    u32                 launch_core;
+    int                 launch_core;
     
     unsigned long       flags;            /* VMCB flags */
     unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] svm: Remove special 'launch' scheduler tail function., Xen patchbot-unstable <=