[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86: Consolidate the storage of MSR_AMD64_DR{0-3}_ADDRESS_MASK



The PV and HVM code both have a copy of these, which gives the false
impression in the context switch code that they are PV/HVM specific.

Move the storage into struct vcpu_msrs, and update all users to match.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>

The second half of this (switching these MSRs to use the guest_{rd,wr}msr()
infrastructure) isn't ready yet, so I've pulled out this half of the patch to
simplify Wei's CONFIG_PV series.
---
 xen/arch/x86/domctl.c              | 12 ++++++------
 xen/arch/x86/hvm/svm/svm.c         | 36 ++++++++++++++++++------------------
 xen/arch/x86/pv/emul-priv-op.c     |  8 ++++----
 xen/arch/x86/traps.c               |  8 ++++----
 xen/include/asm-x86/domain.h       |  3 ---
 xen/include/asm-x86/hvm/svm/vmcb.h |  3 ---
 xen/include/asm-x86/msr.h          |  6 ++++++
 7 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 115ddf6..d985a93 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1328,12 +1328,12 @@ long arch_do_domctl(
 
                 if ( boot_cpu_has(X86_FEATURE_DBEXT) )
                 {
-                    if ( v->arch.pv.dr_mask[0] )
+                    if ( v->arch.msrs->dr_mask[0] )
                     {
                         if ( i < vmsrs->msr_count && !ret )
                         {
                             msr.index = MSR_AMD64_DR0_ADDRESS_MASK;
-                            msr.value = v->arch.pv.dr_mask[0];
+                            msr.value = v->arch.msrs->dr_mask[0];
                             if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) 
)
                                 ret = -EFAULT;
                         }
@@ -1342,12 +1342,12 @@ long arch_do_domctl(
 
                     for ( j = 0; j < 3; ++j )
                     {
-                        if ( !v->arch.pv.dr_mask[1 + j] )
+                        if ( !v->arch.msrs->dr_mask[1 + j] )
                             continue;
                         if ( i < vmsrs->msr_count && !ret )
                         {
                             msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j;
-                            msr.value = v->arch.pv.dr_mask[1 + j];
+                            msr.value = v->arch.msrs->dr_mask[1 + j];
                             if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) 
)
                                 ret = -EFAULT;
                         }
@@ -1392,7 +1392,7 @@ long arch_do_domctl(
                     if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
                          (msr.value >> 32) )
                         break;
-                    v->arch.pv.dr_mask[0] = msr.value;
+                    v->arch.msrs->dr_mask[0] = msr.value;
                     continue;
 
                 case MSR_AMD64_DR1_ADDRESS_MASK ...
@@ -1401,7 +1401,7 @@ long arch_do_domctl(
                          (msr.value >> 32) )
                         break;
                     msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1;
-                    v->arch.pv.dr_mask[msr.index] = msr.value;
+                    v->arch.msrs->dr_mask[msr.index] = msr.value;
                     continue;
                 }
                 break;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index dd0aca4..3b9095b 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -210,10 +210,10 @@ static void svm_save_dr(struct vcpu *v)
         svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);
         svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);
 
-        rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
-        rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
-        rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
-        rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
+        rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]);
+        rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]);
+        rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.msrs->dr_mask[2]);
+        rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.msrs->dr_mask[3]);
     }
 
     v->arch.debugreg[0] = read_debugreg(0);
@@ -241,10 +241,10 @@ static void __restore_debug_registers(struct vmcb_struct 
*vmcb, struct vcpu *v)
         svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
         svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
 
-        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
-        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
-        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
-        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
+        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]);
+        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]);
+        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.msrs->dr_mask[2]);
+        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.msrs->dr_mask[3]);
     }
 
     write_debugreg(0, v->arch.debugreg[0]);
@@ -421,19 +421,19 @@ static void svm_save_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
 {
     if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[0];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[0];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[1];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[1];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[2];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[2];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[3];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[3];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK;
     }
@@ -454,7 +454,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
             else if ( ctxt->msr[i].val >> 32 )
                 err = -EDOM;
             else
-                v->arch.hvm.svm.dr_mask[0] = ctxt->msr[i].val;
+                v->arch.msrs->dr_mask[0] = ctxt->msr[i].val;
             break;
 
         case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
@@ -463,7 +463,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
             else if ( ctxt->msr[i].val >> 32 )
                 err = -EDOM;
             else
-                v->arch.hvm.svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+                v->arch.msrs->dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
                     ctxt->msr[i].val;
             break;
 
@@ -2078,14 +2078,14 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext )
             goto gpf;
-        *msr_content = v->arch.hvm.svm.dr_mask[0];
+        *msr_content = v->arch.msrs->dr_mask[0];
         break;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext )
             goto gpf;
         *msr_content =
-            v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+            v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
         break;
 
     case MSR_AMD_OSVW_ID_LENGTH:
@@ -2276,13 +2276,13 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
             goto gpf;
-        v->arch.hvm.svm.dr_mask[0] = msr_content;
+        v->arch.msrs->dr_mask[0] = msr_content;
         break;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
             goto gpf;
-        v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+        v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
             msr_content;
         break;
 
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 6422f91..07db194 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -917,13 +917,13 @@ static int read_msr(unsigned int reg, uint64_t *val,
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
             break;
-        *val = curr->arch.pv.dr_mask[0];
+        *val = curr->arch.msrs->dr_mask[0];
         return X86EMUL_OKAY;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
             break;
-        *val = curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+        *val = curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];
         return X86EMUL_OKAY;
 
     case MSR_IA32_PERF_CAPABILITIES:
@@ -1111,7 +1111,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
             break;
-        curr->arch.pv.dr_mask[0] = val;
+        curr->arch.msrs->dr_mask[0] = val;
         if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
             wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val);
         return X86EMUL_OKAY;
@@ -1119,7 +1119,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
             break;
-        curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
+        curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
         if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
             wrmsrl(reg, val);
         return X86EMUL_OKAY;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 3988753..c79a346 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2069,10 +2069,10 @@ void activate_debugregs(const struct vcpu *curr)
 
     if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
-        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv.dr_mask[0]);
-        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv.dr_mask[1]);
-        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv.dr_mask[2]);
-        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv.dr_mask[3]);
+        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.msrs->dr_mask[0]);
+        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.msrs->dr_mask[1]);
+        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.msrs->dr_mask[2]);
+        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.msrs->dr_mask[3]);
     }
 }
 
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index e7b8227..f65b3b7 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -547,9 +547,6 @@ struct pv_vcpu
     spinlock_t shadow_ldt_lock;
 #endif
 
-    /* data breakpoint extension MSRs */
-    uint32_t dr_mask[4];
-
     /* Deferred VA-based update state. */
     bool_t need_update_runstate_area;
     struct vcpu_time_info pending_system_time;
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h 
b/xen/include/asm-x86/hvm/svm/vmcb.h
index 48aed78..7017705 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -538,9 +538,6 @@ struct svm_vcpu {
     /* AMD lightweight profiling MSR */
     uint64_t guest_lwp_cfg;      /* guest version */
     uint64_t cpu_lwp_cfg;        /* CPU version */
-
-    /* data breakpoint extension MSRs */
-    uint32_t dr_mask[4];
 };
 
 struct vmcb_struct *alloc_vmcb(void);
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 7a061b2..c1cb38f 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -287,6 +287,12 @@ struct vcpu_msrs
             bool cpuid_faulting:1;
         };
     } misc_features_enables;
+
+    /*
+     * 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
+     * TODO: Not yet handled by guest_{rd,wr}msr() infrastructure.
+     */
+    uint32_t dr_mask[4];
 };
 
 void init_guest_msr_policy(void);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.