WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 3/10] Add HVM support

To: "Keir Fraser" <keir@xxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 3/10] Add HVM support
From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
Date: Wed, 27 Jun 2007 21:33:31 +0800
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Wed, 27 Jun 2007 06:31:34 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: Ace4v8AvEHvNElKqQi6Qqle8QqzaiA==
Thread-topic: [PATCH 3/10] Add HVM support
Add HVM hardware feature suspend/resume.

Signed-off-by Ke Yu <ke.yu@xxxxxxxxx>
Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r f217aafc1c17 xen/arch/x86/acpi/power.c
--- a/xen/arch/x86/acpi/power.c Mon Jun 25 13:28:41 2007 -0400
+++ b/xen/arch/x86/acpi/power.c Mon Jun 25 17:36:32 2007 -0400
@@ -83,7 +83,7 @@ static void device_power_up(void)
 /* Main interface to do xen specific suspend/resume */
 int enter_state(u32 state)
 {
-    struct domain *d;
+    struct domain *d, *pd = NULL;
     unsigned long flags;
     int error;
 
@@ -99,7 +99,15 @@ int enter_state(u32 state)
     
     for_each_domain(d)
         if (d->domain_id != 0)
+        {
             domain_pause(d);
+            if (is_hvm_domain(d) && !hvm_suspend_domain(d))
+            {
+                error = -EINVAL;
+                goto Unpause;
+            }
+            pd = d;
+        }
 
     pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
         acpi_states[state]);
@@ -133,13 +141,22 @@ int enter_state(u32 state)
  Done:
     local_irq_restore(flags);
 
-    for_each_domain(d)
-       if (d->domain_id!=0)
-           domain_unpause(d);
+ Unpause:
+    if (pd)
+    {
+       for_each_domain(d)
+       {
+           /* Unpause until recorded last paused domain */
+           if (d == pd)
+               break;
+
+           if (d->domain_id != 0)
+               domain_unpause(d);
+       }
+    }
 
     spin_unlock(&pm_lock);
     return error;
-
 }
 
 /*
diff -r f217aafc1c17 xen/arch/x86/acpi/suspend.c
--- a/xen/arch/x86/acpi/suspend.c       Mon Jun 25 13:28:41 2007 -0400
+++ b/xen/arch/x86/acpi/suspend.c       Mon Jun 25 14:52:17 2007 -0400
@@ -82,4 +82,6 @@ void restore_rest_processor_state(void)
 
     mtrr_ap_init();
     mcheck_init(&boot_cpu_data);
+    if (hvm_enabled)
+        hvm_resume_cpu();
 }
diff -r f217aafc1c17 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Mon Jun 25 13:28:41 2007 -0400
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Mon Jun 25 14:03:32 2007 -0400
@@ -178,7 +178,7 @@ static void __vmx_clear_vmcs(void *info)
     v->arch.hvm_vmx.launched   = 0;
 }
 
-static void vmx_clear_vmcs(struct vcpu *v)
+void vmx_clear_vmcs(struct vcpu *v)
 {
     int cpu = v->arch.hvm_vmx.active_cpu;
 
diff -r f217aafc1c17 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Jun 25 13:28:41 2007 -0400
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Jun 25 17:38:49 2007 -0400
@@ -53,6 +53,11 @@
 
 char *vmx_msr_bitmap;
 
+static DEFINE_PER_CPU(struct vmcs_struct*, host_vmcs);
+
+static int vmx_suspend_domain(struct domain *d);
+static int vmx_resume_cpu(void);
+
 static void vmx_ctxt_switch_from(struct vcpu *v);
 static void vmx_ctxt_switch_to(struct vcpu *v);
 
@@ -1211,7 +1216,9 @@ static struct hvm_function_table vmx_fun
     .inject_exception     = vmx_inject_exception,
     .init_ap_context      = vmx_init_ap_context,
     .init_hypercall_page  = vmx_init_hypercall_page,
-    .event_injection_faulted = vmx_event_injection_faulted
+    .event_injection_faulted = vmx_event_injection_faulted,
+    .suspend_domain       = vmx_suspend_domain,
+    .resume_cpu           = vmx_resume_cpu,
 };
 
 int start_vmx(void)
@@ -1265,6 +1272,8 @@ int start_vmx(void)
         vmx_free_host_vmcs(vmcs);
         return 0;
     }
+
+    this_cpu(host_vmcs) = vmcs;
 
     vmx_save_host_msrs();
 
@@ -3013,6 +3022,42 @@ asmlinkage void vmx_trace_vmentry(void)
     HVMTRACE_0D(VMENTRY, v);
 }
 
+/* Suspend target domain with VMCS sync-ed */
+int vmx_suspend_domain(struct domain* d){
+    struct vcpu *v; 
+
+    if (!is_hvm_domain(d))
+        return 1;
+
+    if (!atomic_read(&d->pause_count))
+        return 0;
+
+    for_each_vcpu(d, v)
+    {
+        spin_lock(&v->arch.hvm_vmx.vmcs_lock);
+        vmx_clear_vmcs(v);
+        spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
+    }
+
+    return 1;
+}
+
+/* Resume vmx feature on the given cpu */
+static int vmx_resume_cpu(void){
+    struct vmcs_struct *vmcs = this_cpu(host_vmcs);
+
+    if ( __vmxon(virt_to_maddr(vmcs)) )
+    {
+        clear_in_cr4(X86_CR4_VMXE);
+        printk("VMXON failed\n");
+        vmx_free_host_vmcs(vmcs);
+        return 0;
+    }
+
+    printk("VMXON is done\n");
+    return 1;
+}
+
 /*
  * Local variables:
  * mode: C
diff -r f217aafc1c17 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Mon Jun 25 13:28:41 2007 -0400
+++ b/xen/include/asm-x86/hvm/hvm.h     Mon Jun 25 15:27:34 2007 -0400
@@ -145,6 +145,10 @@ struct hvm_function_table {
     void (*init_hypercall_page)(struct domain *d, void
*hypercall_page);
 
     int  (*event_injection_faulted)(struct vcpu *v);
+
+    int  (*suspend_domain)(struct domain *d);
+
+    int  (*resume_cpu)(void);
 };
 
 extern struct hvm_function_table hvm_funcs;
@@ -279,4 +283,26 @@ static inline int hvm_event_injection_fa
     return hvm_funcs.event_injection_faulted(v);
 }
 
+static inline int
+hvm_suspend_domain(struct domain* d)
+{
+    int ret = 1;
+
+    if (hvm_funcs.suspend_domain)
+        ret = hvm_funcs.suspend_domain(d);
+
+    return ret;
+}
+
+static inline int
+hvm_resume_cpu(void)
+{
+    int ret = 1;
+
+    if (hvm_funcs.resume_cpu)
+        ret = hvm_funcs.resume_cpu();
+    
+    return ret;
+}
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff -r f217aafc1c17 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Mon Jun 25 13:28:41 2007
-0400
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Mon Jun 25 14:03:32 2007
-0400
@@ -28,6 +28,7 @@ extern void vmcs_dump_vcpu(void);
 extern void vmcs_dump_vcpu(void);
 extern void vmx_init_vmcs_config(void);
 extern void setup_vmcs_dump(void);
+extern void vmx_clear_vmcs(struct vcpu *v);
 
 struct vmcs_struct {
     u32 vmcs_revision_id;

Attachment: hvm_context.patch
Description: hvm_context.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 3/10] Add HVM support, Tian, Kevin <=