[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.10] x86: Avoid corruption on migrate for vcpus using CPUID Faulting



Xen 4.8 and later virtualises CPUID Faulting support for guests.  However, the
value of MSR_MISC_FEATURES_ENABLES is omitted from the vcpu state, meaning
that the current cpuid faulting setting is lost on migrate/suspend/resume.

To move this MSR, use the new guest_{rd,wr}msr() infrastructure.  This avoids
duplicating or opencoding the feature check and value logic, as well as
abstracting away the internal value representation.  One small adjustment to
guest_wrmsr() is required to cope with being called in toolstack context.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>

This needs backporting to 4.8 and later, and therefore should be considered
for 4.10 at this point.
---
 xen/arch/x86/domctl.c     | 46 +++++++++++++++++++++++++++++++++++++++++++++-
 xen/arch/x86/hvm/hvm.c    | 40 +++++++++++++++++++++++++++++++++++++++-
 xen/arch/x86/msr.c        |  3 ++-
 xen/include/asm-x86/msr.h |  3 +++
 4 files changed, 89 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 80b4df9..e98c4a3 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1286,7 +1286,7 @@ long arch_do_domctl(
         struct xen_domctl_vcpu_msrs *vmsrs = &domctl->u.vcpu_msrs;
         struct xen_domctl_vcpu_msr msr;
         struct vcpu *v;
-        uint32_t nr_msrs = 0;
+        uint32_t nr_msrs = 1;
 
         ret = -ESRCH;
         if ( (vmsrs->vcpu >= d->max_vcpus) ||
@@ -1311,10 +1311,49 @@ long arch_do_domctl(
                 vmsrs->msr_count = nr_msrs;
             else
             {
+                static const uint32_t msrs[] = {
+                    MSR_INTEL_MISC_FEATURES_ENABLES,
+                };
+                unsigned int j;
+
                 i = 0;
 
                 vcpu_pause(v);
 
+                for ( j = 0; j < ARRAY_SIZE(msrs); ++j )
+                {
+                    uint64_t val;
+                    int rc = guest_rdmsr(v, msrs[j], &val);
+
+                    /*
+                     * It is the programmers responsibility to ensure that
+                     * msrs[] contain generally-readable MSRs.
+                     * X86EMUL_EXCEPTION here implies a missing feature.
+                     */
+                    if ( rc == X86EMUL_EXCEPTION )
+                        continue;
+
+                    if ( rc != X86EMUL_OKAY )
+                    {
+                        ASSERT_UNREACHABLE();
+                        ret = -ENXIO;
+                        break;
+                    }
+
+                    if ( !val )
+                        continue; /* Skip empty MSRs. */
+
+                    if ( i < vmsrs->msr_count && !ret )
+                    {
+                        msr.index = msrs[j];
+                        msr.reserved = 0;
+                        msr.value = val;
+                        if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
+                            ret = -EFAULT;
+                    }
+                    ++i;
+                }
+
                 if ( boot_cpu_has(X86_FEATURE_DBEXT) )
                 {
                     unsigned int j;
@@ -1375,6 +1414,11 @@ long arch_do_domctl(
 
                 switch ( msr.index )
                 {
+                case MSR_INTEL_MISC_FEATURES_ENABLES:
+                    if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
+                        break;
+                    continue;
+
                 case MSR_AMD64_DR0_ADDRESS_MASK:
                     if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
                          (msr.value >> 32) )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c765a5e..7f18f3b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1322,11 +1322,14 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 }
 
 #define HVM_CPU_MSR_SIZE(cnt) offsetof(struct hvm_msr, msr[cnt])
-static unsigned int __read_mostly msr_count_max;
+static unsigned int __read_mostly msr_count_max = 1;
 
 static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
+    static const uint32_t msrs[] = {
+        MSR_INTEL_MISC_FEATURES_ENABLES,
+    };
 
     for_each_vcpu ( d, v )
     {
@@ -1340,6 +1343,32 @@ static int hvm_save_cpu_msrs(struct domain *d, 
hvm_domain_context_t *h)
         ctxt = (struct hvm_msr *)&h->data[h->cur];
         ctxt->count = 0;
 
+        for ( i = 0; i < ARRAY_SIZE(msrs); ++i )
+        {
+            uint64_t val;
+            int rc = guest_rdmsr(v, msrs[i], &val);
+
+            /*
+             * It is the programmers responsibility to ensure that msrs[]
+             * contain generally-readable MSRs.  X86EMUL_EXCEPTION here
+             * implies a missing feature.
+             */
+            if ( rc == X86EMUL_EXCEPTION )
+                continue;
+
+            if ( rc != X86EMUL_OKAY )
+            {
+                ASSERT_UNREACHABLE();
+                return -ENXIO;
+            }
+
+            if ( !val )
+                continue; /* Skip empty MSRs. */
+
+            ctxt->msr[ctxt->count].index = msrs[i];
+            ctxt->msr[ctxt->count++].val = val;
+        }
+
         if ( hvm_funcs.save_msr )
             hvm_funcs.save_msr(v, ctxt);
 
@@ -1426,6 +1455,15 @@ static int hvm_load_cpu_msrs(struct domain *d, 
hvm_domain_context_t *h)
     {
         switch ( ctxt->msr[i].index )
         {
+            int rc;
+
+        case MSR_INTEL_MISC_FEATURES_ENABLES:
+            rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val);
+
+            if ( rc != X86EMUL_OKAY )
+                err = -ENXIO;
+            break;
+
         default:
             if ( !ctxt->msr[i]._rsvd )
                 err = -ENXIO;
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index baba44f..31983ed 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -150,6 +150,7 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, 
uint64_t *val)
 
 int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
 {
+    const struct vcpu *curr = current;
     struct domain *d = v->domain;
     struct msr_domain_policy *dp = d->arch.msr;
     struct msr_vcpu_policy *vp = v->arch.msr;
@@ -176,7 +177,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
         vp->misc_features_enables.cpuid_faulting =
             val & MSR_MISC_FEATURES_CPUID_FAULTING;
 
-        if ( is_hvm_domain(d) && cpu_has_cpuid_faulting &&
+        if ( v == curr && is_hvm_domain(d) && cpu_has_cpuid_faulting &&
              (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
             ctxt_switch_levelling(v);
         break;
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 751fa25..41732a4 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -231,6 +231,9 @@ int init_vcpu_msr_policy(struct vcpu *v);
  * not (yet) handled by it and must be processed by legacy handlers. Such
  * behaviour is needed for transition period until all rd/wrmsr are handled
  * by the new MSR infrastructure.
+ *
+ * These functions are also used by the migration logic, so need to cope with
+ * being used outside of v's context.
  */
 int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
 int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.