[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/4] xen interface for HVM S3


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: "Yu, Ke" <ke.yu@xxxxxxxxx>
  • Date: Thu, 17 May 2007 00:48:54 +0800
  • Delivery-date: Wed, 16 May 2007 09:47:23 -0700
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>
  • Thread-index: AceX2hbCpkR6c3JuQ1ePSA41q6uX4w==
  • Thread-topic: [PATCH 1/4] xen interface for HVM S3

[PATCH 1/4] xen interface for HVM S3
  - add two sub hypercalls under HVMOP hypercall:
HVMOP_s3_suspend/HVM_s3_resume
  - HVMOP_s3_suspend will reset all HVM vcpus; meanwhile, power off all
the HVM virtual device in hypervisor, e.g, pic, apic, RTC and HPET
  - HVMOP_s3_resume will set BSP eip to VMXASSIST base, so that HVM will
start from rombios post entry (ffff:0000); meanwhile, power on the HVM
virtual device.

Signed-off-by: Tian Kevin <kevin.tian@xxxxxxxxx>
Signed-off-by: Yu Ke <ke.yu@xxxxxxxxx>

diff -r 305e28c0ef9b xen/arch/x86/hvm/hpet.c
--- a/xen/arch/x86/hvm/hpet.c   Wed May 09 22:50:26 2007 +0100
+++ b/xen/arch/x86/hvm/hpet.c   Tue May 15 14:36:02 2007 -0400
@@ -455,3 +455,17 @@ void hpet_deinit(struct domain *d)
         kill_timer(&h->timers[i]);
 }
 
+void hpet_poweroff(struct domain *d)
+{
+    HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
+    int i;
+
+    for ( i = 0; i < HPET_TIMER_NUM; i++)
+        stop_timer(&hp->timers[i]);
+}
+
+void hpet_poweron(struct domain *d)
+{
+    /* Let's do a fresh init */
+    hpet_init(d->vcpu[0]);
+}
diff -r 305e28c0ef9b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed May 09 22:50:26 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Tue May 15 14:36:02 2007 -0400
@@ -992,6 +992,107 @@ static int hvmop_set_pci_link_route(
 
     rc = 0;
     hvm_set_pci_link_route(d, op.link, op.isa_irq);
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int hvmop_s3_suspend(struct domain *d)
+{
+    struct vcpu   *v;
+
+    if (d->arch.hvm_domain.acpi_sleep_state != 0){
+        printk("Error: HVM domain not working, so skip sleeping\n");
+        return -1;
+    }
+            
+    /* Cancel any pending callback like timer */
+    pit_poweroff(d);
+    rtc_poweroff(d);
+    pmtimer_poweroff(d);
+    hpet_poweroff(d);
+
+    for_each_vcpu ( d, v )
+        hvm_vcpu_reset(v);
+    
+    d->arch.hvm_domain.acpi_sleep_state = 3; /* ACPI S3 */
+    
+    return 0;
+}
+
+static int hvmop_s3_resume(struct domain *d)
+{ 
+    int rc;
+    struct vcpu   *v;
+    struct vcpu_guest_context ctxt;
+    struct cpu_user_regs *regs;
+
+    if (d->arch.hvm_domain.acpi_sleep_state != 3){
+        printk("Error: HVM domain not in S3 sleep state, so skip
resuming\n");
+        rc = -1;
+        goto out;
+    }
+
+    /* setup BSP */
+    memset (&ctxt, 0, sizeof(ctxt));
+    rc = boot_vcpu(d, 0, &ctxt);
+    if (rc != 0) {
+        gdprintk(XENLOG_ERR, "boot vcpu 0 failed\n" );
+        goto out;
+    }
+    
+    v = d->vcpu[0];
+    regs = &v->arch.hvm_vcpu.io_op.io_context;
+    regs->eip = VMXASSIST_BASE;
+    regs->edx = 0;
+
+    pit_poweron(d);
+    rtc_poweron(d);
+    pmtimer_poweron(d);
+    hpet_poweron(d);
+    
+    if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
+        vcpu_wake(v);
+
+    d->arch.hvm_domain.acpi_sleep_state = 0; /* ACPI S0 */
+
+out:
+    return rc;
+}
+
+static int hvmop_s3(
+    XEN_GUEST_HANDLE(xen_hvm_s3_t) uop)
+{
+    struct xen_hvm_s3 op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    if ( !IS_PRIV(current->domain) )
+        return -EPERM;
+
+    d = rcu_lock_domain_by_id(op.domid);
+    if ( d == NULL )
+        return -ESRCH;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    switch (op.op){
+        case HVMOP_S3_suspend:
+            rc = hvmop_s3_suspend(d);
+            break;
+        case HVMOP_S3_resume:
+            rc = hvmop_s3_resume(d);
+            break;
+        default:
+            printk("invalid HVM S3 op %x\n", op.op);
+            goto out;
+    }
 
  out:
     rcu_unlock_domain(d);
@@ -1090,6 +1191,11 @@ long do_hvm_op(unsigned long op, XEN_GUE
         rc = hvmop_set_pci_link_route(
             guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
         break;
+        
+    case HVMOP_S3:
+        rc = hvmop_s3(
+            guest_handle_cast(arg, xen_hvm_s3_t));
+        break;
 
     default:
     {
diff -r 305e28c0ef9b xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c  Wed May 09 22:50:26 2007 +0100
+++ b/xen/arch/x86/hvm/i8254.c  Tue May 15 14:36:02 2007 -0400
@@ -462,6 +462,16 @@ static void pit_reset(void *opaque)
     }
 }
 
+
+void pit_poweroff(struct domain *d)
+{
+    pit_reset(&d->arch.hvm_domain.pl_time.vpit);
+}
+
+void pit_poweron(struct domain *d)
+{
+}
+
 void pit_init(struct vcpu *v, unsigned long cpu_khz)
 {
     PITState *pit = &v->domain->arch.hvm_domain.pl_time.vpit;
diff -r 305e28c0ef9b xen/arch/x86/hvm/pmtimer.c
--- a/xen/arch/x86/hvm/pmtimer.c        Wed May 09 22:50:26 2007 +0100
+++ b/xen/arch/x86/hvm/pmtimer.c        Tue May 15 14:46:21 2007 -0400
@@ -254,3 +254,16 @@ void pmtimer_deinit(struct domain *d)
     PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
     kill_timer(&s->timer);
 }
+
+/* Null for now, since no timer is required */
+void pmtimer_poweroff(struct domain *d)
+{
+}
+
+void pmtimer_poweron(struct domain *d)
+{
+    PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+
+    /* Reset the count */
+    s->pm.tmr_val= 0;
+}
diff -r 305e28c0ef9b xen/arch/x86/hvm/rtc.c
--- a/xen/arch/x86/hvm/rtc.c    Wed May 09 22:50:26 2007 +0100
+++ b/xen/arch/x86/hvm/rtc.c    Tue May 15 14:36:02 2007 -0400
@@ -466,7 +466,30 @@ void rtc_deinit(struct domain *d)
 {
     RTCState *s = &d->arch.hvm_domain.pl_time.vrtc;
 
+    destroy_periodic_time(&s->pt);
     kill_timer(&s->pt.timer);
     kill_timer(&s->second_timer);
     kill_timer(&s->second_timer2);
 }
+
+/* RTC doesn't power off in native, but let's cancel its timers here */
+void rtc_poweroff(struct domain *d)
+{
+    RTCState *s = &d->arch.hvm_domain.pl_time.vrtc;    
+
+    destroy_periodic_time(&s->pt);
+    stop_timer(&s->second_timer);
+    stop_timer(&s->second_timer2);
+}
+
+/* Restart counting process by catching up latest time */
+void rtc_poweron(struct domain *d)
+{
+    RTCState *s = &d->arch.hvm_domain.pl_time.vrtc;    
+
+    s->current_tm = gmtime(get_localtime(d));
+    rtc_copy_date(s);
+    s->next_second_time = NOW() + 1000000000ULL;
+    set_timer(&s->second_timer2, s->next_second_time);
+}
+
diff -r 305e28c0ef9b xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Wed May 09 22:50:26 2007 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Tue May 15 14:36:02 2007 -0400
@@ -960,3 +960,9 @@ int is_lvtt_enabled(struct vcpu *v)
 
     return 1;
 }
+
+void vlapic_poweroff(struct vlapic *s)
+{
+    destroy_periodic_time(&s->pt);
+}
+
diff -r 305e28c0ef9b xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Wed May 09 22:50:26 2007 +0100
+++ b/xen/arch/x86/hvm/vpt.c    Tue May 15 14:36:02 2007 -0400
@@ -120,7 +120,7 @@ void pt_update_irq(struct vcpu *v)
     {
         vlapic_set_irq(vcpu_vlapic(v), irq, 0);
     }
-    else if ( irq >= 0 )
+    else if ( irq>=0 && irq<=15 )
     {
         hvm_isa_irq_deassert(v->domain, irq);
         hvm_isa_irq_assert(v->domain, irq);
diff -r 305e28c0ef9b xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Wed May 09 22:50:26 2007 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Tue May 15 14:36:02 2007 -0400
@@ -54,6 +54,8 @@ struct hvm_domain {
     int                    pbuf_idx;
     spinlock_t             pbuf_lock;
 
+    int                    acpi_sleep_state; /* 0~5: ACPI S0~S5*/
+
     uint64_t               params[HVM_NR_PARAMS];
 };
 
diff -r 305e28c0ef9b xen/include/asm-x86/hvm/vlapic.h
--- a/xen/include/asm-x86/hvm/vlapic.h  Wed May 09 22:50:26 2007 +0100
+++ b/xen/include/asm-x86/hvm/vlapic.h  Tue May 15 14:36:02 2007 -0400
@@ -80,6 +80,8 @@ void vlapic_destroy(struct vcpu *v);
 
 void vlapic_reset(struct vlapic *vlapic);
 
+void vlapic_poweroff(struct vlapic *s);
+
 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value);
 
 int vlapic_accept_pic_intr(struct vcpu *v);
diff -r 305e28c0ef9b xen/include/asm-x86/hvm/vpt.h
--- a/xen/include/asm-x86/hvm/vpt.h     Wed May 09 22:50:26 2007 +0100
+++ b/xen/include/asm-x86/hvm/vpt.h     Tue May 15 14:36:02 2007 -0400
@@ -125,6 +125,8 @@ void destroy_periodic_time(struct period
 void destroy_periodic_time(struct periodic_time *pt);
 
 int pv_pit_handler(int port, int data, int write);
+void pit_poweroff(struct domain *d);
+void pit_poweron(struct domain *d);
 void pit_init(struct vcpu *v, unsigned long cpu_khz);
 void pit_stop_channel0_irq(PITState * pit);
 void pit_migrate_timers(struct vcpu *v);
@@ -132,12 +134,18 @@ void rtc_init(struct vcpu *v, int base);
 void rtc_init(struct vcpu *v, int base);
 void rtc_migrate_timers(struct vcpu *v);
 void rtc_deinit(struct domain *d);
+void rtc_poweroff(struct domain *d);
+void rtc_poweron(struct domain *d);
 int is_rtc_periodic_irq(void *opaque);
 void pmtimer_init(struct vcpu *v);
 void pmtimer_deinit(struct domain *d);
+void pmtimer_poweroff(struct domain *d);
+void pmtimer_poweron(struct domain *d);
 
 void hpet_migrate_timers(struct vcpu *v);
 void hpet_init(struct vcpu *v);
 void hpet_deinit(struct domain *d);
+void hpet_poweroff(struct domain *d);
+void hpet_poweron(struct domain *d);
 
 #endif /* __ASM_X86_HVM_VPT_H__ */
diff -r 305e28c0ef9b xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h   Wed May 09 22:50:26 2007 +0100
+++ b/xen/include/public/hvm/hvm_op.h   Tue May 15 14:36:02 2007 -0400
@@ -70,4 +70,19 @@ typedef struct xen_hvm_set_pci_link_rout
 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 
+#define HVMOP_S3  5
+typedef enum {
+    HVMOP_S3_suspend = 1,
+    HVMOP_S3_resume =2
+} HVMOP_S3_t;
+
+struct xen_hvm_s3 {
+    /* Domain to do s3  */
+    domid_t  domid;
+    /* S3 suspend or resume */
+    HVMOP_S3_t op; 
+};
+typedef struct xen_hvm_s3 xen_hvm_s3_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_s3_t);
+
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */

Attachment: xen.patch
Description: xen.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.