[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v9 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one func


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Date: Thu, 28 Jun 2018 12:25:28 +0300
  • Cc: wei.liu2@xxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, ian.jackson@xxxxxxxxxxxxx, paul.durrant@xxxxxxxxxx, jbeulich@xxxxxxxx, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Thu, 28 Jun 2018 09:26:00 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=AcdY+QOmpisUFyYnM4137kGuoPVGT2ESu7vC1D9TavgyMZMgDWa/YPC6GH3OUtBw67cLUCY1k+LZ+2s8CssI6RiYoiJCkfxop+Ke/eJEHq3mciGbZvuNZ7JKSBhohVIGwgPppDDD3uzJvIb+fH1opY+yvf6F4PH7FkQc7abJc/3nLHd1m+JduEyJjnqI4iWhKow+gEd+UdAE9rheV5Y7pGPnmUGG1U4RQnVqaN2shGAmExKqp0egiOpds1tnOJPhYjEeL9IzGJLmNWxvJTL+WPPVrZSi7P7Pb9Wvnj3LPCs0oDsYVbAZrC2Xj1a4VWIAbNyDs+Czc0lppUqGsSgHjQ==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer:In-Reply-To:References;
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This is used to save data from a single instance.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

---
Changes since V8:
        - Correct coding style issue from comment
        - Move return check to the one function.
---
 xen/arch/x86/hvm/hvm.c            | 222 ++++++++++++++++++++------------------
 xen/include/asm-x86/hvm/support.h |   2 +
 2 files changed, 120 insertions(+), 104 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f51da00..c7ba794 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -788,117 +788,131 @@ static int hvm_load_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
+static int hvm_save_cpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h)
+{
+    struct segment_register seg;
+    struct hvm_hw_cpu ctxt;
+
+    memset(&ctxt, 0, sizeof(ctxt));
+
+    /*
+     * We don't need to save state for a vcpu that is down; the restore
+     * code will leave it down if there is nothing saved.
+     */
+    if ( v->pause_flags & VPF_down )
+        return CONTINUE;
+
+    /* Architecture-specific vmcs/vmcb bits */
+    hvm_funcs.save_cpu_ctxt(v, &ctxt);
+
+    ctxt.tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm_domain.sync_tsc);
+
+    ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
+
+    hvm_get_segment_register(v, x86_seg_idtr, &seg);
+    ctxt.idtr_limit = seg.limit;
+    ctxt.idtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_gdtr, &seg);
+    ctxt.gdtr_limit = seg.limit;
+    ctxt.gdtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_cs, &seg);
+    ctxt.cs_sel = seg.sel;
+    ctxt.cs_limit = seg.limit;
+    ctxt.cs_base = seg.base;
+    ctxt.cs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ds, &seg);
+    ctxt.ds_sel = seg.sel;
+    ctxt.ds_limit = seg.limit;
+    ctxt.ds_base = seg.base;
+    ctxt.ds_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_es, &seg);
+    ctxt.es_sel = seg.sel;
+    ctxt.es_limit = seg.limit;
+    ctxt.es_base = seg.base;
+    ctxt.es_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ss, &seg);
+    ctxt.ss_sel = seg.sel;
+    ctxt.ss_limit = seg.limit;
+    ctxt.ss_base = seg.base;
+    ctxt.ss_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_fs, &seg);
+    ctxt.fs_sel = seg.sel;
+    ctxt.fs_limit = seg.limit;
+    ctxt.fs_base = seg.base;
+    ctxt.fs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_gs, &seg);
+    ctxt.gs_sel = seg.sel;
+    ctxt.gs_limit = seg.limit;
+    ctxt.gs_base = seg.base;
+    ctxt.gs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_tr, &seg);
+    ctxt.tr_sel = seg.sel;
+    ctxt.tr_limit = seg.limit;
+    ctxt.tr_base = seg.base;
+    ctxt.tr_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ldtr, &seg);
+    ctxt.ldtr_sel = seg.sel;
+    ctxt.ldtr_limit = seg.limit;
+    ctxt.ldtr_base = seg.base;
+    ctxt.ldtr_arbytes = seg.attr;
+
+    if ( v->fpu_initialised )
+    {
+        memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
+        ctxt.flags = XEN_X86_FPU_INITIALISED;
+    }
+
+    ctxt.rax = v->arch.user_regs.rax;
+    ctxt.rbx = v->arch.user_regs.rbx;
+    ctxt.rcx = v->arch.user_regs.rcx;
+    ctxt.rdx = v->arch.user_regs.rdx;
+    ctxt.rbp = v->arch.user_regs.rbp;
+    ctxt.rsi = v->arch.user_regs.rsi;
+    ctxt.rdi = v->arch.user_regs.rdi;
+    ctxt.rsp = v->arch.user_regs.rsp;
+    ctxt.rip = v->arch.user_regs.rip;
+    ctxt.rflags = v->arch.user_regs.rflags;
+    ctxt.r8  = v->arch.user_regs.r8;
+    ctxt.r9  = v->arch.user_regs.r9;
+    ctxt.r10 = v->arch.user_regs.r10;
+    ctxt.r11 = v->arch.user_regs.r11;
+    ctxt.r12 = v->arch.user_regs.r12;
+    ctxt.r13 = v->arch.user_regs.r13;
+    ctxt.r14 = v->arch.user_regs.r14;
+    ctxt.r15 = v->arch.user_regs.r15;
+    ctxt.dr0 = v->arch.debugreg[0];
+    ctxt.dr1 = v->arch.debugreg[1];
+    ctxt.dr2 = v->arch.debugreg[2];
+    ctxt.dr3 = v->arch.debugreg[3];
+    ctxt.dr6 = v->arch.debugreg[6];
+    ctxt.dr7 = v->arch.debugreg[7];
+
+    if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
+        return 1;
+    return 0;
+}
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
-    struct hvm_hw_cpu ctxt;
-    struct segment_register seg;
+    int rc = 0;
 
     for_each_vcpu ( d, v )
     {
-        /* We don't need to save state for a vcpu that is down; the restore 
-         * code will leave it down if there is nothing saved. */
-        if ( v->pause_flags & VPF_down )
+        rc = hvm_save_cpu_ctxt_one(v, h);
+        if (rc == CONTINUE)
             continue;
-
-        memset(&ctxt, 0, sizeof(ctxt));
-
-        /* Architecture-specific vmcs/vmcb bits */
-        hvm_funcs.save_cpu_ctxt(v, &ctxt);
-
-        ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
-
-        ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
-
-        hvm_get_segment_register(v, x86_seg_idtr, &seg);
-        ctxt.idtr_limit = seg.limit;
-        ctxt.idtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_gdtr, &seg);
-        ctxt.gdtr_limit = seg.limit;
-        ctxt.gdtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_cs, &seg);
-        ctxt.cs_sel = seg.sel;
-        ctxt.cs_limit = seg.limit;
-        ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ds, &seg);
-        ctxt.ds_sel = seg.sel;
-        ctxt.ds_limit = seg.limit;
-        ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_es, &seg);
-        ctxt.es_sel = seg.sel;
-        ctxt.es_limit = seg.limit;
-        ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ss, &seg);
-        ctxt.ss_sel = seg.sel;
-        ctxt.ss_limit = seg.limit;
-        ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_fs, &seg);
-        ctxt.fs_sel = seg.sel;
-        ctxt.fs_limit = seg.limit;
-        ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_gs, &seg);
-        ctxt.gs_sel = seg.sel;
-        ctxt.gs_limit = seg.limit;
-        ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_tr, &seg);
-        ctxt.tr_sel = seg.sel;
-        ctxt.tr_limit = seg.limit;
-        ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ldtr, &seg);
-        ctxt.ldtr_sel = seg.sel;
-        ctxt.ldtr_limit = seg.limit;
-        ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr;
-
-        if ( v->fpu_initialised )
-        {
-            memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
-            ctxt.flags = XEN_X86_FPU_INITIALISED;
-        }
-
-        ctxt.rax = v->arch.user_regs.rax;
-        ctxt.rbx = v->arch.user_regs.rbx;
-        ctxt.rcx = v->arch.user_regs.rcx;
-        ctxt.rdx = v->arch.user_regs.rdx;
-        ctxt.rbp = v->arch.user_regs.rbp;
-        ctxt.rsi = v->arch.user_regs.rsi;
-        ctxt.rdi = v->arch.user_regs.rdi;
-        ctxt.rsp = v->arch.user_regs.rsp;
-        ctxt.rip = v->arch.user_regs.rip;
-        ctxt.rflags = v->arch.user_regs.rflags;
-        ctxt.r8  = v->arch.user_regs.r8;
-        ctxt.r9  = v->arch.user_regs.r9;
-        ctxt.r10 = v->arch.user_regs.r10;
-        ctxt.r11 = v->arch.user_regs.r11;
-        ctxt.r12 = v->arch.user_regs.r12;
-        ctxt.r13 = v->arch.user_regs.r13;
-        ctxt.r14 = v->arch.user_regs.r14;
-        ctxt.r15 = v->arch.user_regs.r15;
-        ctxt.dr0 = v->arch.debugreg[0];
-        ctxt.dr1 = v->arch.debugreg[1];
-        ctxt.dr2 = v->arch.debugreg[2];
-        ctxt.dr3 = v->arch.debugreg[3];
-        ctxt.dr6 = v->arch.debugreg[6];
-        ctxt.dr7 = v->arch.debugreg[7];
-
-        if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
-            return 1; 
+        else if ( rc != 0 )
+            return 1;
     }
     return 0;
 }
diff --git a/xen/include/asm-x86/hvm/support.h 
b/xen/include/asm-x86/hvm/support.h
index ac33eea..f8988e0 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -52,6 +52,8 @@ extern unsigned int opt_hvm_debug_level;
 #define HVM_DBG_LOG(level, _f, _a...) do {} while (0)
 #endif
 
+#define CONTINUE 2
+
 extern unsigned long hvm_io_bitmap[];
 
 enum hvm_translation_result {
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.