[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 5/8] x86/hvm: Introduce hvm_save_cpu_msrs_one func


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Date: Tue, 29 May 2018 17:58:55 +0300
  • Cc: wei.liu2@xxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, ian.jackson@xxxxxxxxxxxxx, paul.durrant@xxxxxxxxxx, jbeulich@xxxxxxxx, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Tue, 29 May 2018 15:06:33 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=bBga/GHpTpBYv0Pl1SBw+3dR8kHWvIh99k4O36ldwlp+xam4IuED2uxa1al/GtjXzi+iLfiIrnKYiXH4tYWNcAZiAaexC8xYLkZktJTScIWmN7RvH/R6XS1McMW8vEXZiysOP7fmOOSZCCJloVRD+jftwdW3buJYNibsABwhr6EDtI6EXdmsL2Sc8ZlutabAL8kq0CCJI70cDzqqgIsgX44hdetHG0gw4758JbA7bsdmDnIyIHpszbpVVHpLc6sxAKwxPvKPsSIRNVjq9Q7TO/dYFsJzXIxdpSoO5d2IcOLUOR3VdOn3bkb43pklBbmVUXSBlfLjFhNnJ4lyfaLyhg==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer:In-Reply-To:References;
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This is used to save data from a single instance.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c | 59 ++++++++++++++++++++++++++++----------------------
 1 file changed, 33 insertions(+), 26 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e8ecabf..4a22283 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1354,6 +1354,38 @@ static const uint32_t msrs_to_send[] = {
 };
 static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
 
+static int hvm_save_cpu_msrs_one(struct vcpu *v, struct hvm_msr *ctxt)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
+    {
+        uint64_t val;
+        int rc = guest_rdmsr(v, msrs_to_send[i], &val);
+
+        /*
+         * It is the programmers responsibility to ensure that
+         * msrs_to_send[] contain generally-read/write MSRs.
+         * X86EMUL_EXCEPTION here implies a missing feature, and that the
+         * guest doesn't have access to the MSR.
+         */
+        if ( rc == X86EMUL_EXCEPTION )
+            continue;
+
+        if ( rc != X86EMUL_OKAY )
+        {
+            ASSERT_UNREACHABLE();
+            return -ENXIO;
+        }
+
+        if ( !val )
+           continue; /* Skip empty MSRs. */
+        ctxt->msr[ctxt->count].index = msrs_to_send[i];
+        ctxt->msr[ctxt->count++].val = val;
+    }
+    return 0;
+}
+
 static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -1370,32 +1402,7 @@ static int hvm_save_cpu_msrs(struct domain *d, 
hvm_domain_context_t *h)
         ctxt = (struct hvm_msr *)&h->data[h->cur];
         ctxt->count = 0;
 
-        for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
-        {
-            uint64_t val;
-            int rc = guest_rdmsr(v, msrs_to_send[i], &val);
-
-            /*
-             * It is the programmers responsibility to ensure that
-             * msrs_to_send[] contain generally-read/write MSRs.
-             * X86EMUL_EXCEPTION here implies a missing feature, and that the
-             * guest doesn't have access to the MSR.
-             */
-            if ( rc == X86EMUL_EXCEPTION )
-                continue;
-
-            if ( rc != X86EMUL_OKAY )
-            {
-                ASSERT_UNREACHABLE();
-                return -ENXIO;
-            }
-
-            if ( !val )
-                continue; /* Skip empty MSRs. */
-
-            ctxt->msr[ctxt->count].index = msrs_to_send[i];
-            ctxt->msr[ctxt->count++].val = val;
-        }
+        hvm_save_cpu_msrs_one(v, ctxt);
 
         if ( hvm_funcs.save_msr )
             hvm_funcs.save_msr(v, ctxt);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.