|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 3/3] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept()
Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
intercept in common vpmu code.
No functional change intended.
Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@xxxxxxx>
---
Changes in v2:
- change the parameter types to unsigned int
xen/arch/x86/cpu/vpmu_amd.c | 10 ++++-----
xen/arch/x86/cpu/vpmu_intel.c | 24 ++++++++++----------
xen/arch/x86/hvm/svm/svm.c | 7 +++---
xen/arch/x86/hvm/vmx/vmcs.c | 8 +++----
xen/arch/x86/hvm/vmx/vmx.c | 2 ++
xen/arch/x86/include/asm/hvm/hvm.h | 30 +++++++++++++++++++++++++
xen/arch/x86/include/asm/hvm/svm/vmcb.h | 8 +++----
xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 8 +++----
8 files changed, 65 insertions(+), 32 deletions(-)
diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index da8e906972..77dee08588 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -154,9 +154,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
for ( i = 0; i < num_counters; i++ )
{
- svm_clear_msr_intercept(v, counters[i], MSR_RW);
- svm_set_msr_intercept(v, ctrls[i], MSR_W);
- svm_clear_msr_intercept(v, ctrls[i], MSR_R);
+ hvm_clear_msr_intercept(v, counters[i], MSR_RW);
+ hvm_set_msr_intercept(v, ctrls[i], MSR_W);
+ hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
}
msr_bitmap_on(vpmu);
@@ -169,8 +169,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
for ( i = 0; i < num_counters; i++ )
{
- svm_set_msr_intercept(v, counters[i], MSR_RW);
- svm_set_msr_intercept(v, ctrls[i], MSR_RW);
+ hvm_set_msr_intercept(v, counters[i], MSR_RW);
+ hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
}
msr_bitmap_off(vpmu);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 395830e803..ed32d4d754 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -219,22 +219,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
/* Allow Read/Write PMU Counters MSR Directly. */
for ( i = 0; i < fixed_pmc_cnt; i++ )
- vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+ hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
for ( i = 0; i < arch_pmc_cnt; i++ )
{
- vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+ hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
if ( full_width_write )
- vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+ hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
}
/* Allow Read PMU Non-global Controls Directly. */
for ( i = 0; i < arch_pmc_cnt; i++ )
- vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+ hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
- vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
- vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+ hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+ hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
}
static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -242,21 +242,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
unsigned int i;
for ( i = 0; i < fixed_pmc_cnt; i++ )
- vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+ hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
for ( i = 0; i < arch_pmc_cnt; i++ )
{
- vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+ hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
if ( full_width_write )
- vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+ hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
}
for ( i = 0; i < arch_pmc_cnt; i++ )
- vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+ hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
- vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
- vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+ hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+ hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
}
static inline void __core2_vpmu_save(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 3ee0805ff3..cbd8eff270 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -277,7 +277,8 @@ svm_msrbit(unsigned long *msr_bitmap, uint32_t msr)
return msr_bit;
}
-void svm_set_msr_intercept(struct vcpu *v, unsigned int msr, unsigned int
flags)
+void cf_check svm_set_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags)
{
unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
@@ -291,8 +292,8 @@ void svm_set_msr_intercept(struct vcpu *v, unsigned int
msr, unsigned int flags)
__set_bit(msr * 2 + 1, msr_bit);
}
-void svm_clear_msr_intercept(struct vcpu *v, unsigned int msr,
- unsigned int flags)
+void cf_check svm_clear_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags)
{
unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index e7b67313a2..c051bcb91b 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -891,8 +891,8 @@ static void vmx_set_host_env(struct vcpu *v)
(unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
}
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
- unsigned int type)
+void cf_check vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int type)
{
struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
struct domain *d = v->domain;
@@ -923,8 +923,8 @@ void vmx_clear_msr_intercept(struct vcpu *v, unsigned int
msr,
ASSERT(!"MSR out of range for interception\n");
}
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
- unsigned int type)
+void cf_check vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int type)
{
struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 8a873147a5..6a33e92b0a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2742,6 +2742,8 @@ static struct hvm_function_table __initdata_cf_clobber
vmx_function_table = {
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_vlapic_mode = vmx_vlapic_msr_changed,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+ .set_msr_intercept = vmx_set_msr_intercept,
+ .clear_msr_intercept = vmx_clear_msr_intercept,
.enable_msr_interception = vmx_enable_msr_interception,
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
.altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index 5740a64281..96ff235614 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -213,6 +213,10 @@ struct hvm_function_table {
paddr_t *L1_gpa, unsigned int *page_order,
uint8_t *p2m_acc, struct npfec npfec);
+ void (*set_msr_intercept)(struct vcpu *v, unsigned int msr,
+ unsigned int flags);
+ void (*clear_msr_intercept)(struct vcpu *v, unsigned int msr,
+ unsigned int flags);
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
/* Alternate p2m */
@@ -647,6 +651,20 @@ static inline int nhvm_hap_walk_L1_p2m(
v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
}
+static inline void hvm_set_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags)
+{
+ if ( hvm_funcs.set_msr_intercept )
+ alternative_vcall(hvm_funcs.set_msr_intercept, v, msr, flags);
+}
+
+static inline void hvm_clear_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags)
+{
+ if ( hvm_funcs.clear_msr_intercept )
+ alternative_vcall(hvm_funcs.clear_msr_intercept, v, msr, flags);
+}
+
static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
{
alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
@@ -905,6 +923,18 @@ static inline void hvm_set_reg(struct vcpu *v, unsigned
int reg, uint64_t val)
ASSERT_UNREACHABLE();
}
+static inline void hvm_set_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags)
+{
+ ASSERT_UNREACHABLE();
+}
+
+static inline void hvm_clear_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags)
+{
+ ASSERT_UNREACHABLE();
+}
+
#define is_viridian_domain(d) ((void)(d), false)
#define is_viridian_vcpu(v) ((void)(v), false)
#define has_viridian_time_ref_count(d) ((void)(d), false)
diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
index 94deb0a236..5e84b4f4c1 100644
--- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
+++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
@@ -603,10 +603,10 @@ void svm_destroy_vmcb(struct vcpu *v);
void setup_vmcb_dump(void);
-void svm_set_msr_intercept(struct vcpu *v, unsigned int msr,
- unsigned int flags);
-void svm_clear_msr_intercept(struct vcpu *v, unsigned int msr,
- unsigned int flags);
+void cf_check svm_set_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags);
+void cf_check svm_clear_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int flags);
#define svm_disable_intercept_for_msr(v, msr) \
svm_clear_msr_intercept(v, msr, MSR_RW)
#define svm_enable_intercept_for_msr(v, msr) \
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index af6a95b5d9..7f7d785977 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -633,10 +633,10 @@ static inline int vmx_write_guest_msr(struct vcpu *v,
uint32_t msr,
return 0;
}
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
- unsigned int type);
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
- unsigned int type);
+void cf_check vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int type);
+void cf_check vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
+ unsigned int type);
void vmx_vmcs_switch(paddr_t from, paddr_t to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |