WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Add HVM hardware feature suspend/resume.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Add HVM hardware feature suspend/resume.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 27 Jul 2007 02:44:11 -0700
Delivery-date: Fri, 27 Jul 2007 02:42:13 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1184170989 -3600
# Node ID ad11f74d298c3afe7a5778b9fcf4f8a000a9d0eb
# Parent  24379dde8ac4b58389121c38ee56d46e7ea84b17
Add HVM hardware feature suspend/resume.
Signed-off-by: Ke Yu <ke.yu@xxxxxxxxx>
Signed-off-by: Kevin Tian <kevin.tian@xxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/acpi/power.c          |   33 ++++++++----
 xen/arch/x86/hvm/hvm.c             |    3 -
 xen/arch/x86/hvm/svm/svm.c         |    7 +-
 xen/arch/x86/hvm/vmx/vmcs.c        |  101 +++++++++++++++++++++++++++++--------
 xen/arch/x86/hvm/vmx/vmx.c         |   14 +----
 xen/include/asm-x86/hvm/hvm.h      |   20 +++++--
 xen/include/asm-x86/hvm/vmx/vmcs.h |    3 +
 7 files changed, 129 insertions(+), 52 deletions(-)

diff -r 24379dde8ac4 -r ad11f74d298c xen/arch/x86/acpi/power.c
--- a/xen/arch/x86/acpi/power.c Wed Jul 11 15:47:14 2007 +0100
+++ b/xen/arch/x86/acpi/power.c Wed Jul 11 17:23:09 2007 +0100
@@ -82,10 +82,27 @@ static void device_power_up(void)
     console_resume();
 }
 
+static void freeze_domains(void)
+{
+    struct domain *d;
+
+    for_each_domain(d)
+        if (d->domain_id != 0)
+            domain_pause(d);
+}
+
+static void thaw_domains(void)
+{
+    struct domain *d;
+
+    for_each_domain(d)
+        if (d->domain_id != 0)
+            domain_unpause(d);
+}
+
 /* Main interface to do xen specific suspend/resume */
 int enter_state(u32 state)
 {
-    struct domain *d;
     unsigned long flags;
     int error;
 
@@ -99,9 +116,9 @@ int enter_state(u32 state)
     if (!spin_trylock(&pm_lock))
         return -EBUSY;
     
-    for_each_domain(d)
-        if (d->domain_id != 0)
-            domain_pause(d);
+    freeze_domains();
+
+    hvm_suspend_cpu();
 
     pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
         acpi_states[state]);
@@ -135,13 +152,11 @@ int enter_state(u32 state)
  Done:
     local_irq_restore(flags);
 
-    for_each_domain(d)
-       if (d->domain_id!=0)
-           domain_unpause(d);
-
+    hvm_resume_cpu();
+
+    thaw_domains();
     spin_unlock(&pm_lock);
     return error;
-
 }
 
 /*
diff -r 24379dde8ac4 -r ad11f74d298c xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Jul 11 15:47:14 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Jul 11 17:23:09 2007 +0100
@@ -78,8 +78,7 @@ void hvm_enable(struct hvm_function_tabl
 
 void hvm_disable(void)
 {
-    if ( hvm_enabled )
-        hvm_funcs.disable();
+    hvm_suspend_cpu();
 }
 
 void hvm_stts(struct vcpu *v)
diff -r 24379dde8ac4 -r ad11f74d298c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Jul 11 15:47:14 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Jul 11 17:23:09 2007 +0100
@@ -94,9 +94,8 @@ static void svm_inject_exception(struct 
     vmcb->eventinj = event;
 }
 
-static void stop_svm(void)
-{
-    /* We turn off the EFER_SVME bit. */
+static void svm_suspend_cpu(void)
+{
     write_efer(read_efer() & ~EFER_SVME);
 }
 
@@ -974,7 +973,7 @@ static int svm_event_injection_faulted(s
 
 static struct hvm_function_table svm_function_table = {
     .name                 = "SVM",
-    .disable              = stop_svm,
+    .suspend_cpu          = svm_suspend_cpu,
     .domain_initialise    = svm_domain_initialise,
     .domain_destroy       = svm_domain_destroy,
     .vcpu_initialise      = svm_vcpu_initialise,
diff -r 24379dde8ac4 -r ad11f74d298c xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Jul 11 15:47:14 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed Jul 11 17:23:09 2007 +0100
@@ -45,7 +45,9 @@ u32 vmx_vmentry_control __read_mostly;
 u32 vmx_vmentry_control __read_mostly;
 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
 
+static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs);
 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
+static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
 
 static u32 vmcs_revision_id __read_mostly;
 
@@ -185,34 +187,81 @@ static void __vmx_clear_vmcs(void *info)
 static void __vmx_clear_vmcs(void *info)
 {
     struct vcpu *v = info;
-
-    __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
-
-    v->arch.hvm_vmx.active_cpu = -1;
-    v->arch.hvm_vmx.launched   = 0;
-
-    if ( v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs) )
-        this_cpu(current_vmcs) = NULL;
+    struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
+
+    /* Otherwise we can nest (vmx_suspend_cpu() vs. vmx_clear_vmcs()). */
+    ASSERT(!local_irq_is_enabled());
+
+    if ( arch_vmx->active_cpu == smp_processor_id() )
+    {
+        __vmpclear(virt_to_maddr(arch_vmx->vmcs));
+
+        arch_vmx->active_cpu = -1;
+        arch_vmx->launched   = 0;
+
+        list_del(&arch_vmx->active_list);
+
+        if ( arch_vmx->vmcs == this_cpu(current_vmcs) )
+            this_cpu(current_vmcs) = NULL;
+    }
 }
 
 static void vmx_clear_vmcs(struct vcpu *v)
 {
     int cpu = v->arch.hvm_vmx.active_cpu;
 
-    if ( cpu == -1 )
-        return;
-
-    if ( cpu == smp_processor_id() )
-        return __vmx_clear_vmcs(v);
-
-    on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
+    if ( cpu != -1 )
+        on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
 }
 
 static void vmx_load_vmcs(struct vcpu *v)
 {
+    unsigned long flags;
+
+    local_irq_save(flags);
+
+    if ( v->arch.hvm_vmx.active_cpu == -1 )
+    {
+        list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
+        v->arch.hvm_vmx.active_cpu = smp_processor_id();
+    }
+
+    ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());
+
     __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
-    v->arch.hvm_vmx.active_cpu = smp_processor_id();
     this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;
+
+    local_irq_restore(flags);
+}
+
+void vmx_suspend_cpu(void)
+{
+    struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
+    unsigned long flags;
+
+    local_irq_save(flags);
+
+    while ( !list_empty(active_vmcs_list) )
+        __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
+                                    struct vcpu, arch.hvm_vmx.active_list));
+
+    if ( read_cr4() & X86_CR4_VMXE )
+    {
+        __vmxoff();
+        clear_in_cr4(X86_CR4_VMXE);
+    }
+
+    local_irq_restore(flags);
+}
+
+void vmx_resume_cpu(void)
+{
+    if ( !read_cr4() & X86_CR4_VMXE )
+    {
+        set_in_cr4(X86_CR4_VMXE);
+        if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
+            BUG();
+    }
 }
 
 void vmx_vmcs_enter(struct vcpu *v)
@@ -247,12 +296,17 @@ void vmx_vmcs_exit(struct vcpu *v)
 
 struct vmcs_struct *vmx_alloc_host_vmcs(void)
 {
-    return vmx_alloc_vmcs();
+    ASSERT(this_cpu(host_vmcs) == NULL);
+    this_cpu(host_vmcs) = vmx_alloc_vmcs();
+    INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
+    return this_cpu(host_vmcs);
 }
 
 void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
 {
+    ASSERT(vmcs == this_cpu(host_vmcs));
     vmx_free_vmcs(vmcs);
+    this_cpu(host_vmcs) = NULL;
 }
 
 struct xgt_desc {
@@ -451,12 +505,17 @@ static void construct_vmcs(struct vcpu *
 
 int vmx_create_vmcs(struct vcpu *v)
 {
-    if ( v->arch.hvm_vmx.vmcs == NULL )
-    {
-        if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
+    struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
+
+    if ( arch_vmx->vmcs == NULL )
+    {
+        if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
             return -ENOMEM;
 
-        __vmx_clear_vmcs(v);
+        INIT_LIST_HEAD(&arch_vmx->active_list);
+        __vmpclear(virt_to_maddr(arch_vmx->vmcs));
+        arch_vmx->active_cpu = -1;
+        arch_vmx->launched   = 0;
     }
 
     construct_vmcs(v);
diff -r 24379dde8ac4 -r ad11f74d298c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jul 11 15:47:14 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Jul 11 17:23:09 2007 +0100
@@ -907,15 +907,6 @@ static void vmx_ctxt_switch_to(struct vc
     vmx_restore_dr(v);
 }
 
-static void stop_vmx(void)
-{
-    if ( !(read_cr4() & X86_CR4_VMXE) )
-        return;
-
-    __vmxoff();
-    clear_in_cr4(X86_CR4_VMXE);
-}
-
 static void vmx_store_cpu_guest_regs(
     struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
 {
@@ -1244,7 +1235,6 @@ static void disable_intercept_for_msr(u3
 
 static struct hvm_function_table vmx_function_table = {
     .name                 = "VMX",
-    .disable              = stop_vmx,
     .domain_initialise    = vmx_domain_initialise,
     .domain_destroy       = vmx_domain_destroy,
     .vcpu_initialise      = vmx_vcpu_initialise,
@@ -1271,7 +1261,9 @@ static struct hvm_function_table vmx_fun
     .inject_exception     = vmx_inject_exception,
     .init_ap_context      = vmx_init_ap_context,
     .init_hypercall_page  = vmx_init_hypercall_page,
-    .event_injection_faulted = vmx_event_injection_faulted
+    .event_injection_faulted = vmx_event_injection_faulted,
+    .suspend_cpu          = vmx_suspend_cpu,
+    .resume_cpu           = vmx_resume_cpu,
 };
 
 int start_vmx(void)
diff -r 24379dde8ac4 -r ad11f74d298c xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Jul 11 15:47:14 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Jul 11 17:23:09 2007 +0100
@@ -72,11 +72,6 @@ struct hvm_function_table {
     char *name;
 
     /*
-     *  Disable HVM functionality
-     */
-    void (*disable)(void);
-
-    /*
      * Initialise/destroy HVM domain/vcpu resources
      */
     int  (*domain_initialise)(struct domain *d);
@@ -160,6 +155,9 @@ struct hvm_function_table {
     void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
 
     int  (*event_injection_faulted)(struct vcpu *v);
+
+    void (*suspend_cpu)(void);
+    void (*resume_cpu)(void);
 };
 
 extern struct hvm_function_table hvm_funcs;
@@ -316,4 +314,16 @@ static inline int hvm_event_injection_fa
 /* These exceptions must always be intercepted. */
 #define HVM_TRAP_MASK (1U << TRAP_machine_check)
 
+static inline void hvm_suspend_cpu(void)
+{
+    if ( hvm_funcs.suspend_cpu )
+        hvm_funcs.suspend_cpu();
+}
+
+static inline void hvm_resume_cpu(void)
+{
+    if ( hvm_funcs.resume_cpu )
+        hvm_funcs.resume_cpu();
+}
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff -r 24379dde8ac4 -r ad11f74d298c xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Jul 11 15:47:14 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Jul 11 17:23:09 2007 +0100
@@ -28,6 +28,8 @@ extern void vmcs_dump_vcpu(void);
 extern void vmcs_dump_vcpu(void);
 extern void vmx_init_vmcs_config(void);
 extern void setup_vmcs_dump(void);
+extern void vmx_suspend_cpu(void);
+extern void vmx_resume_cpu(void);
 
 struct vmcs_struct {
     u32 vmcs_revision_id;
@@ -59,6 +61,7 @@ struct arch_vmx_struct {
      *  - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR.
      *  - Launched on active CPU by VMLAUNCH when current VMCS.
      */
+    struct list_head     active_list;
     int                  active_cpu;
     int                  launched;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Add HVM hardware feature suspend/resume., Xen patchbot-unstable <=