[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN PATCH v5 10/13] x86/vpmu: guard calls to vmx/svm functions



If VMX/SVM disabled in the build, we may still want to have vPMU drivers for
PV guests. Yet in such case before using VMX/SVM features and functions we have
to explicitly check if they're available in the build. For this purpose
(and also not to complicate conditionals) two helpers introduced --
is_{vmx,svm}_vcpu(v) that check both HVM & VMX/SVM conditions at the same time,
and they replace is_hvm_vcpu(v) macro in Intel/AMD PMU drivers.

Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
---
changes in v5:
- change kconfig option name SVM/VMX -> AMD_SVM/INTEL_VMX
 - replace is_hvm_vcpu() with is_{svm,vmx}_vcpu()
changes in v4:
 - use IS_ENABLED(CONFIG_{VMX,SVM}) instead of using_{vmx,svm}
 - fix typo
changes in v3:
 - introduced macro is_{vmx,svm}_vcpu(v)
 - changed description
 - reordered patch, do not modify conditionals w/ cpu_has_vmx_msr_bitmap check
---
 xen/arch/x86/cpu/vpmu_amd.c   | 11 ++++++-----
 xen/arch/x86/cpu/vpmu_intel.c | 32 +++++++++++++++++---------------
 2 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 97e6315bd9..a082450e92 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -27,6 +27,7 @@
 #define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT))
 #define set_guest_mode(msr) ((msr) |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
 #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH - 1))))
+#define is_svm_vcpu(v) (IS_ENABLED(CONFIG_AMD_SVM) && is_hvm_vcpu(v))
 
 static unsigned int __read_mostly num_counters;
 static const u32 __read_mostly *counters;
@@ -289,7 +290,7 @@ static int cf_check amd_vpmu_save(struct vcpu *v,  bool 
to_guest)
 
     context_save(v);
 
-    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_svm_vcpu(v) &&
          is_msr_bitmap_on(vpmu) )
         amd_vpmu_unset_msr_bitmap(v);
 
@@ -349,7 +350,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         return -EINVAL;
 
     /* For all counters, enable guest only mode for HVM guest */
-    if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
+    if ( is_svm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
          !is_guest_mode(msr_content) )
     {
         set_guest_mode(msr_content);
@@ -363,7 +364,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
             return 0;
         vpmu_set(vpmu, VPMU_RUNNING);
 
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
              amd_vpmu_set_msr_bitmap(v);
     }
 
@@ -372,7 +373,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
     {
         vpmu_reset(vpmu, VPMU_RUNNING);
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
              amd_vpmu_unset_msr_bitmap(v);
         release_pmu_ownership(PMU_OWNER_HVM);
     }
@@ -415,7 +416,7 @@ static void cf_check amd_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+    if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
         amd_vpmu_unset_msr_bitmap(v);
 
     xfree(vpmu->context);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index cd414165df..085495ab5f 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -54,6 +54,8 @@
 #define MSR_PMC_ALIAS_MASK       (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
 static bool __read_mostly full_width_write;
 
+#define is_vmx_vcpu(v) (IS_ENABLED(CONFIG_INTEL_VMX) && is_hvm_vcpu(v))
+
 /*
  * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
  * counters. 4 bits for every counter.
@@ -266,10 +268,10 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(MSR_P6_EVNTSEL(i), xen_pmu_cntr_pair[i].control);
     }
 
-    if ( !is_hvm_vcpu(v) )
+    if ( !is_vmx_vcpu(v) )
         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
     /* Save MSR to private context to make it fork-friendly */
-    else if ( mem_sharing_enabled(v->domain) )
+    else if ( is_vmx_vcpu(v) && mem_sharing_enabled(v->domain) )
         vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                            &core2_vpmu_cxt->global_ctrl);
 }
@@ -278,7 +280,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool 
to_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( !is_hvm_vcpu(v) )
+    if ( !is_vmx_vcpu(v) )
         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 
     if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
@@ -287,7 +289,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool 
to_guest)
     __core2_vpmu_save(v);
 
     /* Unset PMU MSR bitmap to trap lazy load. */
-    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_vmx_vcpu(v) &&
          cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v);
 
@@ -326,14 +328,14 @@ static inline void __core2_vpmu_load(struct vcpu *v)
     if ( vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
         wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area);
 
-    if ( !is_hvm_vcpu(v) )
+    if ( !is_vmx_vcpu(v) )
     {
         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl);
         core2_vpmu_cxt->global_ovf_ctrl = 0;
         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
     }
     /* Restore MSR from context when used with a fork */
-    else if ( mem_sharing_is_fork(v->domain) )
+    else if ( is_vmx_vcpu(v) && mem_sharing_is_fork(v->domain) )
         vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                             core2_vpmu_cxt->global_ctrl);
 }
@@ -381,7 +383,7 @@ static int core2_vpmu_verify(struct vcpu *v)
     }
 
     if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) &&
-         !(is_hvm_vcpu(v)
+         !(is_vmx_vcpu(v)
            ? is_canonical_address(core2_vpmu_cxt->ds_area)
            : __addr_ok(core2_vpmu_cxt->ds_area)) )
         return -EINVAL;
@@ -442,7 +444,7 @@ static int cf_check core2_vpmu_alloc_resource(struct vcpu 
*v)
     if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
         return 0;
 
-    if ( is_hvm_vcpu(v) )
+    if ( is_vmx_vcpu(v) )
     {
         if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0) )
             goto out_err;
@@ -513,7 +515,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int 
*type, int *index)
         __core2_vpmu_load(current);
         vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 
-        if ( is_hvm_vcpu(current) && cpu_has_vmx_msr_bitmap )
+        if ( is_vmx_vcpu(current) && cpu_has_vmx_msr_bitmap )
             core2_vpmu_set_msr_bitmap(current);
     }
     return 1;
@@ -562,7 +564,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
             return -EINVAL;
         if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
         {
-            if ( !(is_hvm_vcpu(v) ? is_canonical_address(msr_content)
+            if ( !(is_vmx_vcpu(v) ? is_canonical_address(msr_content)
                                   : __addr_ok(msr_content)) )
             {
                 gdprintk(XENLOG_WARNING,
@@ -584,7 +586,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         if ( msr_content & fixed_ctrl_mask )
             return -EINVAL;
 
-        if ( is_hvm_vcpu(v) )
+        if ( is_vmx_vcpu(v) )
             vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                &core2_vpmu_cxt->global_ctrl);
         else
@@ -653,7 +655,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
             if ( blocked )
                 return -EINVAL;
 
-            if ( is_hvm_vcpu(v) )
+            if ( is_vmx_vcpu(v) )
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                    &core2_vpmu_cxt->global_ctrl);
             else
@@ -672,7 +674,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         wrmsrl(msr, msr_content);
     else
     {
-        if ( is_hvm_vcpu(v) )
+        if ( is_vmx_vcpu(v) )
             vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
         else
             wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
@@ -706,7 +708,7 @@ static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, 
uint64_t *msr_content)
             *msr_content = core2_vpmu_cxt->global_status;
             break;
         case MSR_CORE_PERF_GLOBAL_CTRL:
-            if ( is_hvm_vcpu(v) )
+            if ( is_vmx_vcpu(v) )
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
             else
                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
@@ -808,7 +810,7 @@ static void cf_check core2_vpmu_destroy(struct vcpu *v)
     vpmu->context = NULL;
     xfree(vpmu->priv_context);
     vpmu->priv_context = NULL;
-    if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
+    if ( is_vmx_vcpu(v) && cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v);
     release_pmu_ownership(PMU_OWNER_HVM);
     vpmu_clear(vpmu);
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.