[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/6] hvm/mtrr: use the hardware number of variable ranges for Dom0



Expand the size of the variable ranges array to match the size of the
underlying hardware, this is a preparatory change for copying the
hardware MTRR state for Dom0.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
Changes since v1:
 - Fix hvm_msr_{read,write}_intercept().
 - Relax the checks in hvm_{save/load}_mtrr_msr.
---
 xen/arch/x86/hvm/hvm.c     |  7 +++++--
 xen/arch/x86/hvm/mtrr.c    | 34 ++++++++++++++++++++++++++++++----
 xen/include/asm-x86/mtrr.h |  2 ++
 3 files changed, 37 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c23983cdff..8b30c93ec6 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3489,10 +3489,13 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t 
*msr_content)
         index = msr - MSR_MTRRfix4K_C0000;
         *msr_content = fixed_range_base[index + 3];
         break;
-    case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT-1):
+    case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT_MAX - 1):
         if ( !d->arch.cpuid->basic.mtrr )
             goto gp_fault;
         index = msr - MSR_IA32_MTRR_PHYSBASE(0);
+        if ( (index / 2) >=
+             MASK_EXTR(v->arch.hvm_vcpu.mtrr.mtrr_cap, MTRRcap_VCNT) )
+            goto gp_fault;
         *msr_content = var_range_base[index];
         break;
 
@@ -3650,7 +3653,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t 
msr_content,
                                      index, msr_content) )
             goto gp_fault;
         break;
-    case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT-1):
+    case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT_MAX - 1):
         if ( !d->arch.cpuid->basic.mtrr )
             goto gp_fault;
         if ( !mtrr_var_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index bdff56a912..e71f428a3d 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -154,14 +154,26 @@ uint8_t pat_type_2_pte_flags(uint8_t pat_type)
 int hvm_vcpu_cacheattr_init(struct vcpu *v)
 {
     struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
+    unsigned int num_var_ranges =
+        is_hardware_domain(v->domain) ? MASK_EXTR(mtrr_state.mtrr_cap,
+                                                  MTRRcap_VCNT)
+                                      : MTRR_VCNT;
+
+    if ( num_var_ranges > MTRR_VCNT_MAX )
+    {
+        ASSERT(is_hardware_domain(v->domain));
+        printk("WARNING: limited Dom0 variable range MTRRs from %u to %u\n",
+               num_var_ranges, MTRR_VCNT_MAX);
+        num_var_ranges = MTRR_VCNT_MAX;
+    }
 
     memset(m, 0, sizeof(*m));
 
-    m->var_ranges = xzalloc_array(struct mtrr_var_range, MTRR_VCNT);
+    m->var_ranges = xzalloc_array(struct mtrr_var_range, num_var_ranges);
     if ( m->var_ranges == NULL )
         return -ENOMEM;
 
-    m->mtrr_cap = (1u << 10) | (1u << 8) | MTRR_VCNT;
+    m->mtrr_cap = (1u << 10) | (1u << 8) | num_var_ranges;
 
     v->arch.hvm_vcpu.pat_cr =
         ((uint64_t)PAT_TYPE_WRBACK) |               /* PAT0: WB */
@@ -445,6 +457,9 @@ bool_t mtrr_var_range_msr_set(
     uint64_t *var_range_base = (uint64_t*)m->var_ranges;
 
     index = msr - MSR_IA32_MTRR_PHYSBASE(0);
+    if ( (index / 2) >= MASK_EXTR(m->mtrr_cap, MTRRcap_VCNT) )
+        return 0;
+
     if ( var_range_base[index] == msr_content )
         return 1;
 
@@ -675,6 +690,8 @@ static int hvm_save_mtrr_msr(struct domain *d, 
hvm_domain_context_t *h)
     /* save mtrr&pat */
     for_each_vcpu(d, v)
     {
+        unsigned int num_var_ranges;
+
         mtrr_state = &v->arch.hvm_vcpu.mtrr;
 
         hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
@@ -683,7 +700,11 @@ static int hvm_save_mtrr_msr(struct domain *d, 
hvm_domain_context_t *h)
                                 | (mtrr_state->enabled << 10);
         hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
 
-        for ( i = 0; i < MTRR_VCNT; i++ )
+        num_var_ranges = MASK_EXTR(mtrr_state->mtrr_cap, MTRRcap_VCNT);
+        if ( num_var_ranges > MTRR_VCNT )
+            return -EINVAL;
+
+        for ( i = 0; i < num_var_ranges; i++ )
         {
             /* save physbase */
             hw_mtrr.msr_mtrr_var[i*2] =
@@ -709,6 +730,7 @@ static int hvm_load_mtrr_msr(struct domain *d, 
hvm_domain_context_t *h)
     struct vcpu *v;
     struct mtrr_state *mtrr_state;
     struct hvm_hw_mtrr hw_mtrr;
+    unsigned int num_var_ranges;
 
     vcpuid = hvm_load_instance(h);
     if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
@@ -727,10 +749,14 @@ static int hvm_load_mtrr_msr(struct domain *d, 
hvm_domain_context_t *h)
 
     mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
 
+    num_var_ranges = MASK_EXTR(mtrr_state->mtrr_cap, MTRRcap_VCNT);
+    if ( num_var_ranges > MTRR_VCNT )
+        return -EINVAL;
+
     for ( i = 0; i < NUM_FIXED_MSR; i++ )
         mtrr_fix_range_msr_set(d, mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);
 
-    for ( i = 0; i < MTRR_VCNT; i++ )
+    for ( i = 0; i < num_var_ranges; i++ )
     {
         mtrr_var_range_msr_set(d, mtrr_state,
                                MSR_IA32_MTRR_PHYSBASE(i),
diff --git a/xen/include/asm-x86/mtrr.h b/xen/include/asm-x86/mtrr.h
index 69cf68cf7b..e44c3c28bd 100644
--- a/xen/include/asm-x86/mtrr.h
+++ b/xen/include/asm-x86/mtrr.h
@@ -39,6 +39,8 @@ typedef u8 mtrr_type;
 #define MTRR_PHYSBASE_SHIFT      12
 /* Number of variable range MSR pairs we emulate for HVM guests: */
 #define MTRR_VCNT                8
+/* Maximum number of variable range MSR pairs if FE is supported. */
+#define MTRR_VCNT_MAX            40
 
 struct mtrr_var_range {
        uint64_t base;
-- 
2.17.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.