[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v2] x86/domctl: Don't pause the whole domain if only getting vcpu state


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Date: Fri, 6 Oct 2017 13:02:30 +0300
  • Cc: wei.liu2@xxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, ian.jackson@xxxxxxxxxxxxx, paul.durrant@xxxxxxxxxx, jbeulich@xxxxxxxx, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Fri, 06 Oct 2017 10:04:26 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=Bo1q0zZg9vkNtpz7BW30OgxMxgxFMtwmt6zuzQLVkhWmF4cgNRNRgiQogZPTQ6NeLjC4t4Ja55xz9zXGvgYsQyFQSBPJdISNn6N8YDGKNQJoYA2794UTPiuRJAIwTjiPA3hNOrn0MsTNZA8Lag6hKCcCh9M7UXKgft3ZjWp/Be+h8YuPu0I6x6hq7Vk/EtU7tvqQfknrEBGaLWzZmTmZpt2eiDQA+ZXukCRjb1U78M/td81hdBOrxyQ4JWHzwif6Zrih3EXDeaQWYutFl+DnlGzaJ0REBFF1gzvSpLaSDoH4vg20F2ofc1rOyvMjx8LV8GZ+VqoTgwJZ5SuAUMoBVw==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer;
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

This patch adds the hvm_save_one_cpu_ctxt() function.
It optimizes by only pausing the vcpu on all HVMSR_PER_VCPU save
callbacks where only data for one VCPU is required.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

---
Changes since V1:
        - Integrated the vcpu check into all the save callbacks
---
 tools/tests/vhpet/emul.h       |   3 +-
 tools/tests/vhpet/main.c       |   2 +-
 xen/arch/x86/cpu/mcheck/vmce.c |  16 ++-
 xen/arch/x86/domctl.c          |   2 -
 xen/arch/x86/hvm/hpet.c        |   2 +-
 xen/arch/x86/hvm/hvm.c         | 280 ++++++++++++++++++++++++++---------------
 xen/arch/x86/hvm/i8254.c       |   2 +-
 xen/arch/x86/hvm/irq.c         |   6 +-
 xen/arch/x86/hvm/mtrr.c        |  32 ++++-
 xen/arch/x86/hvm/pmtimer.c     |   2 +-
 xen/arch/x86/hvm/rtc.c         |   2 +-
 xen/arch/x86/hvm/save.c        |  71 ++++++++---
 xen/arch/x86/hvm/vioapic.c     |   2 +-
 xen/arch/x86/hvm/viridian.c    |  17 ++-
 xen/arch/x86/hvm/vlapic.c      |  23 +++-
 xen/arch/x86/hvm/vpic.c        |   2 +-
 xen/include/asm-x86/hvm/hvm.h  |   2 +
 xen/include/asm-x86/hvm/save.h |   5 +-
 18 files changed, 324 insertions(+), 147 deletions(-)

diff --git a/tools/tests/vhpet/emul.h b/tools/tests/vhpet/emul.h
index 383acff..99d5bbd 100644
--- a/tools/tests/vhpet/emul.h
+++ b/tools/tests/vhpet/emul.h
@@ -296,7 +296,8 @@ struct hvm_hw_hpet
 };
 
 typedef int (*hvm_save_handler)(struct domain *d,
-                                hvm_domain_context_t *h);
+                                hvm_domain_context_t *h,
+                                unsigned int instance);
 typedef int (*hvm_load_handler)(struct domain *d,
                                 hvm_domain_context_t *h);
 
diff --git a/tools/tests/vhpet/main.c b/tools/tests/vhpet/main.c
index 6fe65ea..3d8e7f5 100644
--- a/tools/tests/vhpet/main.c
+++ b/tools/tests/vhpet/main.c
@@ -177,7 +177,7 @@ void __init hvm_register_savevm(uint16_t typecode,
 
 int do_save(uint16_t typecode, struct domain *d, hvm_domain_context_t *h)
 {
-    return hvm_sr_handlers[typecode].save(d, h);
+    return hvm_sr_handlers[typecode].save(d, h, d->max_vcpus);
 }
 
 int do_load(uint16_t typecode, struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index e07cd2f..a1a12a5 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -349,12 +349,24 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
     return ret;
 }
 
-static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h, 
unsigned int instance)
 {
     struct vcpu *v;
     int err = 0;
 
-    for_each_vcpu ( d, v )
+    if( instance < d->max_vcpus )
+    {
+        struct hvm_vmce_vcpu ctxt;
+
+        v = d->vcpu[instance];
+        ctxt.caps = v->arch.vmce.mcg_cap;
+        ctxt.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
+        ctxt.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
+        ctxt.mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
+
+        err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
+    }
+    else for_each_vcpu ( d, v )
     {
         struct hvm_vmce_vcpu ctxt = {
             .caps = v->arch.vmce.mcg_cap,
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 540ba08..d3c4e14 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -624,12 +624,10 @@ long arch_do_domctl(
              !is_hvm_domain(d) )
             break;
 
-        domain_pause(d);
         ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
                            domctl->u.hvmcontext_partial.instance,
                            domctl->u.hvmcontext_partial.buffer,
                            &domctl->u.hvmcontext_partial.bufsz);
-        domain_unpause(d);
 
         if ( !ret )
             copyback = true;
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 3ea895a..56f4691 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -509,7 +509,7 @@ static const struct hvm_mmio_ops hpet_mmio_ops = {
 };
 
 
-static int hpet_save(struct domain *d, hvm_domain_context_t *h)
+static int hpet_save(struct domain *d, hvm_domain_context_t *h, unsigned int 
instance)
 {
     HPETState *hp = domain_vhpet(d);
     struct vcpu *v = pt_global_vcpu_target(d);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 205b4cb..140f2c3 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -728,13 +728,19 @@ void hvm_domain_destroy(struct domain *d)
     }
 }
 
-static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h, 
unsigned int instance)
 {
     struct vcpu *v;
     struct hvm_tsc_adjust ctxt;
     int err = 0;
 
-    for_each_vcpu ( d, v )
+    if( instance < d->max_vcpus )
+    {
+        v = d->vcpu[instance];
+        ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+        err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
+    }
+    else for_each_vcpu ( d, v )
     {
         ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
         err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
@@ -768,117 +774,135 @@ static int hvm_load_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
-static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+void hvm_save_one_cpu_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
+{
+    struct segment_register seg;
+
+    /* Architecture-specific vmcs/vmcb bits */
+    hvm_funcs.save_cpu_ctxt(v, ctxt);
+
+    ctxt->tsc = hvm_get_guest_tsc_fixed(v, 
v->domain->arch.hvm_domain.sync_tsc);
+
+    ctxt->msr_tsc_aux = hvm_msr_tsc_aux(v);
+
+    hvm_get_segment_register(v, x86_seg_idtr, &seg);
+    ctxt->idtr_limit = seg.limit;
+    ctxt->idtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_gdtr, &seg);
+    ctxt->gdtr_limit = seg.limit;
+    ctxt->gdtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_cs, &seg);
+    ctxt->cs_sel = seg.sel;
+    ctxt->cs_limit = seg.limit;
+    ctxt->cs_base = seg.base;
+    ctxt->cs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ds, &seg);
+    ctxt->ds_sel = seg.sel;
+    ctxt->ds_limit = seg.limit;
+    ctxt->ds_base = seg.base;
+    ctxt->ds_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_es, &seg);
+    ctxt->es_sel = seg.sel;
+    ctxt->es_limit = seg.limit;
+    ctxt->es_base = seg.base;
+    ctxt->es_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ss, &seg);
+    ctxt->ss_sel = seg.sel;
+    ctxt->ss_limit = seg.limit;
+    ctxt->ss_base = seg.base;
+    ctxt->ss_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_fs, &seg);
+    ctxt->fs_sel = seg.sel;
+    ctxt->fs_limit = seg.limit;
+    ctxt->fs_base = seg.base;
+    ctxt->fs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_gs, &seg);
+    ctxt->gs_sel = seg.sel;
+    ctxt->gs_limit = seg.limit;
+    ctxt->gs_base = seg.base;
+    ctxt->gs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_tr, &seg);
+    ctxt->tr_sel = seg.sel;
+    ctxt->tr_limit = seg.limit;
+    ctxt->tr_base = seg.base;
+    ctxt->tr_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ldtr, &seg);
+    ctxt->ldtr_sel = seg.sel;
+    ctxt->ldtr_limit = seg.limit;
+    ctxt->ldtr_base = seg.base;
+    ctxt->ldtr_arbytes = seg.attr;
+
+    if ( v->fpu_initialised )
+    {
+        memcpy(ctxt->fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt->fpu_regs));
+        ctxt->flags = XEN_X86_FPU_INITIALISED;
+    }
+
+    ctxt->rax = v->arch.user_regs.rax;
+    ctxt->rbx = v->arch.user_regs.rbx;
+    ctxt->rcx = v->arch.user_regs.rcx;
+    ctxt->rdx = v->arch.user_regs.rdx;
+    ctxt->rbp = v->arch.user_regs.rbp;
+    ctxt->rsi = v->arch.user_regs.rsi;
+    ctxt->rdi = v->arch.user_regs.rdi;
+    ctxt->rsp = v->arch.user_regs.rsp;
+    ctxt->rip = v->arch.user_regs.rip;
+    ctxt->rflags = v->arch.user_regs.rflags;
+    ctxt->r8  = v->arch.user_regs.r8;
+    ctxt->r9  = v->arch.user_regs.r9;
+    ctxt->r10 = v->arch.user_regs.r10;
+    ctxt->r11 = v->arch.user_regs.r11;
+    ctxt->r12 = v->arch.user_regs.r12;
+    ctxt->r13 = v->arch.user_regs.r13;
+    ctxt->r14 = v->arch.user_regs.r14;
+    ctxt->r15 = v->arch.user_regs.r15;
+    ctxt->dr0 = v->arch.debugreg[0];
+    ctxt->dr1 = v->arch.debugreg[1];
+    ctxt->dr2 = v->arch.debugreg[2];
+    ctxt->dr3 = v->arch.debugreg[3];
+    ctxt->dr6 = v->arch.debugreg[6];
+    ctxt->dr7 = v->arch.debugreg[7];
+}
+
+static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h, 
unsigned int instance)
 {
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
-    struct segment_register seg;
 
-    for_each_vcpu ( d, v )
+    if( instance < d->max_vcpus)
+    {
+        v = d->vcpu[instance];
+        if ( v->pause_flags & VPF_down )
+            return 1;
+        memset(&ctxt, 0, sizeof(ctxt));
+
+        hvm_save_one_cpu_ctxt(v, &ctxt);
+
+        if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
+            return 1;
+    }
+    else for_each_vcpu ( d, v )
     {
-        /* We don't need to save state for a vcpu that is down; the restore 
+        /* We don't need to save state for a vcpu that is down; the restore
          * code will leave it down if there is nothing saved. */
         if ( v->pause_flags & VPF_down )
             continue;
 
         memset(&ctxt, 0, sizeof(ctxt));
 
-        /* Architecture-specific vmcs/vmcb bits */
-        hvm_funcs.save_cpu_ctxt(v, &ctxt);
-
-        ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
-
-        ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
-
-        hvm_get_segment_register(v, x86_seg_idtr, &seg);
-        ctxt.idtr_limit = seg.limit;
-        ctxt.idtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_gdtr, &seg);
-        ctxt.gdtr_limit = seg.limit;
-        ctxt.gdtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_cs, &seg);
-        ctxt.cs_sel = seg.sel;
-        ctxt.cs_limit = seg.limit;
-        ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ds, &seg);
-        ctxt.ds_sel = seg.sel;
-        ctxt.ds_limit = seg.limit;
-        ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_es, &seg);
-        ctxt.es_sel = seg.sel;
-        ctxt.es_limit = seg.limit;
-        ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ss, &seg);
-        ctxt.ss_sel = seg.sel;
-        ctxt.ss_limit = seg.limit;
-        ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_fs, &seg);
-        ctxt.fs_sel = seg.sel;
-        ctxt.fs_limit = seg.limit;
-        ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_gs, &seg);
-        ctxt.gs_sel = seg.sel;
-        ctxt.gs_limit = seg.limit;
-        ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_tr, &seg);
-        ctxt.tr_sel = seg.sel;
-        ctxt.tr_limit = seg.limit;
-        ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ldtr, &seg);
-        ctxt.ldtr_sel = seg.sel;
-        ctxt.ldtr_limit = seg.limit;
-        ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr;
-
-        if ( v->fpu_initialised )
-        {
-            memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
-            ctxt.flags = XEN_X86_FPU_INITIALISED;
-        }
-
-        ctxt.rax = v->arch.user_regs.rax;
-        ctxt.rbx = v->arch.user_regs.rbx;
-        ctxt.rcx = v->arch.user_regs.rcx;
-        ctxt.rdx = v->arch.user_regs.rdx;
-        ctxt.rbp = v->arch.user_regs.rbp;
-        ctxt.rsi = v->arch.user_regs.rsi;
-        ctxt.rdi = v->arch.user_regs.rdi;
-        ctxt.rsp = v->arch.user_regs.rsp;
-        ctxt.rip = v->arch.user_regs.rip;
-        ctxt.rflags = v->arch.user_regs.rflags;
-        ctxt.r8  = v->arch.user_regs.r8;
-        ctxt.r9  = v->arch.user_regs.r9;
-        ctxt.r10 = v->arch.user_regs.r10;
-        ctxt.r11 = v->arch.user_regs.r11;
-        ctxt.r12 = v->arch.user_regs.r12;
-        ctxt.r13 = v->arch.user_regs.r13;
-        ctxt.r14 = v->arch.user_regs.r14;
-        ctxt.r15 = v->arch.user_regs.r15;
-        ctxt.dr0 = v->arch.debugreg[0];
-        ctxt.dr1 = v->arch.debugreg[1];
-        ctxt.dr2 = v->arch.debugreg[2];
-        ctxt.dr3 = v->arch.debugreg[3];
-        ctxt.dr6 = v->arch.debugreg[6];
-        ctxt.dr7 = v->arch.debugreg[7];
+        hvm_save_one_cpu_ctxt(v, &ctxt);
 
         if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
-            return 1; 
+            return 1;
     }
     return 0;
 }
@@ -1162,7 +1186,8 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, 
hvm_load_cpu_ctxt,
                                            save_area) + \
                                   xstate_ctxt_size(xcr0))
 
-static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h,
+                                     unsigned int instance)
 {
     struct vcpu *v;
     struct hvm_hw_cpu_xsave *ctxt;
@@ -1170,7 +1195,27 @@ static int hvm_save_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
     if ( !cpu_has_xsave )
         return 0;   /* do nothing */
 
-    for_each_vcpu ( d, v )
+    if( instance < d->max_vcpus )
+    {
+        unsigned int size;
+
+        v = d->vcpu[instance];
+        size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
+
+        if ( !xsave_enabled(v) )
+            return 1;
+        if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) )
+            return 1;
+        ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+        h->cur += size;
+
+        ctxt->xfeature_mask = xfeature_mask;
+        ctxt->xcr0 = v->arch.xcr0;
+        ctxt->xcr0_accum = v->arch.xcr0_accum;
+        expand_xsave_states(v, &ctxt->save_area,
+                            size - offsetof(typeof(*ctxt), save_area));
+    }
+    else for_each_vcpu ( d, v )
     {
         unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
 
@@ -1324,10 +1369,39 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 #define HVM_CPU_MSR_SIZE(cnt) offsetof(struct hvm_msr, msr[cnt])
 static unsigned int __read_mostly msr_count_max;
 
-static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h,
+                             unsigned int instance)
 {
     struct vcpu *v;
 
+    if( instance < d->max_vcpus )
+    {
+        struct hvm_msr *ctxt;
+        unsigned int i;
+
+        v = d->vcpu[instance];
+
+        if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
+                             HVM_CPU_MSR_SIZE(msr_count_max)) )
+            return 1;
+        ctxt = (struct hvm_msr *)&h->data[h->cur];
+        ctxt->count = 0;
+
+        if ( hvm_funcs.save_msr )
+            hvm_funcs.save_msr(v, ctxt);
+
+        ASSERT(ctxt->count <= msr_count_max);
+
+        for ( i = 0; i < ctxt->count; ++i )
+            ctxt->msr[i]._rsvd = 0;
+
+        if ( ctxt->count )
+            h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
+        else
+            h->cur -= sizeof(struct hvm_save_descriptor);
+
+    }
+
     for_each_vcpu ( d, v )
     {
         struct hvm_msr *ctxt;
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index 992f08d..143b64d 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -390,7 +390,7 @@ void pit_stop_channel0_irq(PITState *pit)
     spin_unlock(&pit->lock);
 }
 
-static int pit_save(struct domain *d, hvm_domain_context_t *h)
+static int pit_save(struct domain *d, hvm_domain_context_t *h, unsigned int 
instance)
 {
     PITState *pit = domain_vpit(d);
     int rc;
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index e425df9..dbbf769 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -598,7 +598,7 @@ static int __init dump_irq_info_key_init(void)
 }
 __initcall(dump_irq_info_key_init);
 
-static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_pci(struct domain *d, hvm_domain_context_t *h, unsigned 
int instance)
 {
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
     unsigned int asserted, pdev, pintx;
@@ -630,7 +630,7 @@ static int irq_save_pci(struct domain *d, 
hvm_domain_context_t *h)
     return rc;
 }
 
-static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_isa(struct domain *d, hvm_domain_context_t *h, unsigned 
int instance)
 {
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
 
@@ -638,7 +638,7 @@ static int irq_save_isa(struct domain *d, 
hvm_domain_context_t *h)
     return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
 }
 
-static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_link(struct domain *d, hvm_domain_context_t *h, unsigned 
int instance)
 {
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
 
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index b721c63..b998d80 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -666,14 +666,42 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, 
uint64_t gfn_start,
     return 0;
 }
 
-static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h, 
unsigned int instance)
 {
     int i;
     struct vcpu *v;
     struct hvm_hw_mtrr hw_mtrr;
     struct mtrr_state *mtrr_state;
     /* save mtrr&pat */
-    for_each_vcpu(d, v)
+    if( instance < d->max_vcpus )
+    {
+        v = d->vcpu[instance];
+        mtrr_state = &v->arch.hvm_vcpu.mtrr;
+
+        hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
+
+        hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
+                                | (mtrr_state->enabled << 10);
+        hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
+
+        for ( i = 0; i < MTRR_VCNT; i++ )
+        {
+            /* save physbase */
+            hw_mtrr.msr_mtrr_var[i*2] =
+                ((uint64_t*)mtrr_state->var_ranges)[i*2];
+            /* save physmask */
+            hw_mtrr.msr_mtrr_var[i*2+1] =
+                ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
+        }
+
+        for ( i = 0; i < NUM_FIXED_MSR; i++ )
+            hw_mtrr.msr_mtrr_fixed[i] =
+                ((uint64_t*)mtrr_state->fixed_ranges)[i];
+
+        if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
+            return 1;
+    }
+    else for_each_vcpu(d, v)
     {
         mtrr_state = &v->arch.hvm_vcpu.mtrr;
 
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index b70c299..21dcdeb 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -249,7 +249,7 @@ static int handle_pmt_io(
     return X86EMUL_OKAY;
 }
 
-static int acpi_save(struct domain *d, hvm_domain_context_t *h)
+static int acpi_save(struct domain *d, hvm_domain_context_t *h, unsigned int 
instance)
 {
     struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
     PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index bcfa169..83f339d 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -737,7 +737,7 @@ void rtc_migrate_timers(struct vcpu *v)
 }
 
 /* Save RTC hardware state */
-static int rtc_save(struct domain *d, hvm_domain_context_t *h)
+static int rtc_save(struct domain *d, hvm_domain_context_t *h, unsigned int 
instance)
 {
     RTCState *s = domain_vrtc(d);
     int rc;
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index 8984a23..97b56f7 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -138,6 +138,7 @@ int hvm_save_one(struct domain *d, unsigned int typecode, 
unsigned int instance,
     int rv;
     hvm_domain_context_t ctxt = { };
     const struct hvm_save_descriptor *desc;
+    bool is_single_instance = false;
 
     if ( d->is_dying ||
          typecode > HVM_SAVE_CODE_MAX ||
@@ -145,41 +146,75 @@ int hvm_save_one(struct domain *d, unsigned int typecode, 
unsigned int instance,
          !hvm_sr_handlers[typecode].save )
         return -EINVAL;
 
+    if( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+        instance < d->max_vcpus )
+        is_single_instance = true;
+
     ctxt.size = hvm_sr_handlers[typecode].size;
-    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU )
+    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+         instance == d->max_vcpus )
         ctxt.size *= d->max_vcpus;
+
     ctxt.data = xmalloc_bytes(ctxt.size);
     if ( !ctxt.data )
         return -ENOMEM;
 
-    if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
+    if( is_single_instance )
+        vcpu_pause(d->vcpu[instance]);
+    else
+        domain_pause(d);
+
+    if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt, instance)) != 0 )
         printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n",
                d->domain_id, typecode, rv);
     else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) )
     {
         uint32_t off;
 
-        for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length )
+        if( is_single_instance )
         {
-            desc = (void *)(ctxt.data + off);
+            desc = (void *)(ctxt.data);
             /* Move past header */
-            off += sizeof(*desc);
+            off = sizeof(*desc);
             if ( ctxt.cur < desc->length ||
                  off > ctxt.cur - desc->length )
-                break;
-            if ( instance == desc->instance )
+                rv = -EFAULT;
+            rv = 0;
+            if ( guest_handle_is_null(handle) )
+                *bufsz = desc->length;
+            else if ( *bufsz < desc->length )
+                rv = -ENOBUFS;
+            else if ( copy_to_guest(handle, ctxt.data + off, desc->length) )
+                rv = -EFAULT;
+            else
+                *bufsz = desc->length;
+            vcpu_unpause(d->vcpu[instance]);
+        }
+        else
+        {
+            for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += 
desc->length )
             {
-                rv = 0;
-                if ( guest_handle_is_null(handle) )
-                    *bufsz = desc->length;
-                else if ( *bufsz < desc->length )
-                    rv = -ENOBUFS;
-                else if ( copy_to_guest(handle, ctxt.data + off, desc->length) 
)
-                    rv = -EFAULT;
-                else
-                    *bufsz = desc->length;
-                break;
+                desc = (void *)(ctxt.data + off);
+                /* Move past header */
+                off += sizeof(*desc);
+                if ( ctxt.cur < desc->length ||
+                     off > ctxt.cur - desc->length )
+                    break;
+                if ( instance == desc->instance )
+                {
+                    rv = 0;
+                    if ( guest_handle_is_null(handle) )
+                        *bufsz = desc->length;
+                    else if ( *bufsz < desc->length )
+                        rv = -ENOBUFS;
+                    else if ( copy_to_guest(handle, ctxt.data + off, 
desc->length) )
+                        rv = -EFAULT;
+                    else
+                        *bufsz = desc->length;
+                    break;
+                }
             }
+            domain_unpause(d);
         }
     }
 
@@ -225,7 +260,7 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
         {
             printk(XENLOG_G_INFO "HVM%d save: %s\n",
                    d->domain_id, hvm_sr_handlers[i].name);
-            if ( handler(d, h) != 0 )
+            if ( handler(d, h, d->max_vcpus) != 0 )
             {
                 printk(XENLOG_G_ERR
                        "HVM%d save: failed to save type %"PRIu16"\n",
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 97b419f..34d6907 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -569,7 +569,7 @@ int vioapic_get_trigger_mode(const struct domain *d, 
unsigned int gsi)
     return vioapic->redirtbl[pin].fields.trig_mode;
 }
 
-static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
+static int ioapic_save(struct domain *d, hvm_domain_context_t *h, unsigned int 
instance)
 {
     struct hvm_vioapic *s;
 
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index f0fa59d..5943bf4 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -994,7 +994,7 @@ out:
     return HVM_HCALL_completed;
 }
 
-static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t 
*h, unsigned int instance)
 {
     struct hvm_viridian_domain_context ctxt = {
         .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val,
@@ -1030,14 +1030,25 @@ static int viridian_load_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
                           viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
 
-static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h, 
unsigned int instance)
 {
     struct vcpu *v;
 
     if ( !is_viridian_domain(d) )
         return 0;
 
-    for_each_vcpu( d, v ) {
+    if( instance < d->max_vcpus )
+    {
+        struct hvm_viridian_vcpu_context ctxt;
+
+        v = d->vcpu[instance];
+        ctxt.vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw;
+        ctxt.vp_assist_vector = v->arch.hvm_vcpu.viridian.vp_assist.vector;
+
+        if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
+            return 1;
+    }
+    else for_each_vcpu( d, v ) {
         struct hvm_viridian_vcpu_context ctxt = {
             .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw,
             .vp_assist_vector = v->arch.hvm_vcpu.viridian.vp_assist.vector,
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 4bfc53e..591631a 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1387,7 +1387,7 @@ static void lapic_rearm(struct vlapic *s)
     s->timer_last_update = s->pt.last_plt_gtime;
 }
 
-static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h, 
unsigned int instance)
 {
     struct vcpu *v;
     struct vlapic *s;
@@ -1396,7 +1396,13 @@ static int lapic_save_hidden(struct domain *d, 
hvm_domain_context_t *h)
     if ( !has_vlapic(d) )
         return 0;
 
-    for_each_vcpu ( d, v )
+    if( instance < d->max_vcpus )
+    {
+        v = d->vcpu[instance];
+        s = vcpu_vlapic(v);
+        rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw);
+    }
+    else for_each_vcpu ( d, v )
     {
         s = vcpu_vlapic(v);
         if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 )
@@ -1406,7 +1412,7 @@ static int lapic_save_hidden(struct domain *d, 
hvm_domain_context_t *h)
     return rc;
 }
 
-static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h, unsigned 
int instance)
 {
     struct vcpu *v;
     struct vlapic *s;
@@ -1415,7 +1421,16 @@ static int lapic_save_regs(struct domain *d, 
hvm_domain_context_t *h)
     if ( !has_vlapic(d) )
         return 0;
 
-    for_each_vcpu ( d, v )
+    if( instance < d->max_vcpus )
+    {
+        v = d->vcpu[instance];
+        if ( hvm_funcs.sync_pir_to_irr )
+            hvm_funcs.sync_pir_to_irr(v);
+
+        s = vcpu_vlapic(v);
+        rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs);
+    }
+    else for_each_vcpu ( d, v )
     {
         if ( hvm_funcs.sync_pir_to_irr )
             hvm_funcs.sync_pir_to_irr(v);
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index e160bbd..6b77f3c 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -371,7 +371,7 @@ static int vpic_intercept_elcr_io(
     return X86EMUL_OKAY;
 }
 
-static int vpic_save(struct domain *d, hvm_domain_context_t *h)
+static int vpic_save(struct domain *d, hvm_domain_context_t *h, unsigned int 
instance)
 {
     struct hvm_hw_vpic *s;
     int i;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index b687e03..c4b7b3d 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -609,6 +609,8 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
     return false;
 }
 
+void hvm_save_one_cpu_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt);
+
 /* Check CR4/EFER values */
 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
                            signed int cr0_pg);
diff --git a/xen/include/asm-x86/hvm/save.h b/xen/include/asm-x86/hvm/save.h
index f889e8f..a2c39c4 100644
--- a/xen/include/asm-x86/hvm/save.h
+++ b/xen/include/asm-x86/hvm/save.h
@@ -95,8 +95,9 @@ static inline uint16_t hvm_load_instance(struct 
hvm_domain_context *h)
  * The save handler may save multiple instances of a type into the buffer;
  * the load handler will be called once for each instance found when
  * restoring.  Both return non-zero on error. */
-typedef int (*hvm_save_handler) (struct domain *d, 
-                                 hvm_domain_context_t *h);
+typedef int (*hvm_save_handler) (struct domain *d,
+                                 hvm_domain_context_t *h,
+                                 unsigned int instance);
 typedef int (*hvm_load_handler) (struct domain *d,
                                  hvm_domain_context_t *h);
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.