|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 1/4] x86/vpmu: Move vpmu_do_cpuid() handling into {pv, hvm}_cpuid()
This reduces the net complexity of CPUID handling by having all adjustments in
at the same place. Remove the now-unused vpmu_do_cpuid() infrastructure.
This involves introducing a vpmu_enabled() predicate, and making the Intel
specific VPMU_CPU_HAS_* constants public.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
xen/arch/x86/cpu/vpmu.c | 10 ---------
xen/arch/x86/cpu/vpmu_intel.c | 52 -------------------------------------------
xen/arch/x86/hvm/hvm.c | 23 +++++++++++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 2 --
xen/arch/x86/traps.c | 26 +++++++++++++++++-----
xen/include/asm-x86/vpmu.h | 10 ++++-----
6 files changed, 48 insertions(+), 75 deletions(-)
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 2f9ddf6..a542f4d 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -347,16 +347,6 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
}
}
-void vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- struct vpmu_struct *vpmu = vcpu_vpmu(current);
-
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_cpuid )
- vpmu->arch_vpmu_ops->do_cpuid(input, eax, ebx, ecx, edx);
-}
-
static void vpmu_save_force(void *arg)
{
struct vcpu *v = (struct vcpu *)arg;
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index e8049ed..e3f25c8 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -68,10 +68,6 @@
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
static bool_t __read_mostly full_width_write;
-/* Intel-specific VPMU features */
-#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
-#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
-
/*
* MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
* counters. 4 bits for every counter.
@@ -782,33 +778,6 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t
*msr_content)
return 0;
}
-static void core2_vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- switch ( input )
- {
- case 0x1:
-
- if ( vpmu_is_set(vcpu_vpmu(current), VPMU_CPU_HAS_DS) )
- {
- /* Switch on the 'Debug Store' feature in CPUID.EAX[1]:EDX[21] */
- *edx |= cpufeat_mask(X86_FEATURE_DS);
- if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
- *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
- if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
- *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
- }
- break;
-
- case 0xa:
- /* Report at most version 3 since that's all we currently emulate */
- if ( MASK_EXTR(*eax, PMU_VERSION_MASK) > 3 )
- *eax = (*eax & ~PMU_VERSION_MASK) | MASK_INSR(3, PMU_VERSION_MASK);
- break;
- }
-}
-
/* Dump vpmu info on console, called in the context of keyhandler 'q'. */
static void core2_vpmu_dump(const struct vcpu *v)
{
@@ -900,32 +869,12 @@ struct arch_vpmu_ops core2_vpmu_ops = {
.do_wrmsr = core2_vpmu_do_wrmsr,
.do_rdmsr = core2_vpmu_do_rdmsr,
.do_interrupt = core2_vpmu_do_interrupt,
- .do_cpuid = core2_vpmu_do_cpuid,
.arch_vpmu_destroy = core2_vpmu_destroy,
.arch_vpmu_save = core2_vpmu_save,
.arch_vpmu_load = core2_vpmu_load,
.arch_vpmu_dump = core2_vpmu_dump
};
-static void core2_no_vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- /*
- * As in this case the vpmu is not enabled reset some bits in the
- * architectural performance monitoring related part.
- */
- if ( input == 0xa )
- {
- *eax &= ~PMU_VERSION_MASK;
- *eax &= ~PMU_GENERAL_NR_MASK;
- *eax &= ~PMU_GENERAL_WIDTH_MASK;
-
- *edx &= ~PMU_FIXED_NR_MASK;
- *edx &= ~PMU_FIXED_WIDTH_MASK;
- }
-}
-
/*
* If its a vpmu msr set it to 0.
*/
@@ -943,7 +892,6 @@ static int core2_no_vpmu_do_rdmsr(unsigned int msr,
uint64_t *msr_content)
*/
struct arch_vpmu_ops core2_no_vpmu_ops = {
.do_rdmsr = core2_no_vpmu_do_rdmsr,
- .do_cpuid = core2_no_vpmu_do_cpuid,
};
int vmx_vpmu_initialise(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7763798..1c35dde 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3516,6 +3516,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
*edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
}
+
+ if ( vpmu_enabled(v) &&
+ vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
+ {
+ *edx |= cpufeat_mask(X86_FEATURE_DS);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
+ }
+
break;
case 0x7:
@@ -3646,6 +3657,18 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
}
break;
+ case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
+ {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ /* Report at most version 3 since that's all we currently emulate */
+ if ( (*eax & 0xff) > 3 )
+ *eax = (*eax & ~0xff) | 3;
+ break;
+
case 0x80000001:
*ecx &= hvm_featureset[FEATURESET_e1c];
*edx &= hvm_featureset[FEATURESET_e1d];
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index afde634..505cdea 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2362,8 +2362,6 @@ static void vmx_cpuid_intercept(
break;
}
- vpmu_do_cpuid(input, eax, ebx, ecx, edx);
-
HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d8b68e1..48ac519 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1192,6 +1192,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
}
}
+ if ( vpmu_enabled(curr) &&
+ vpmu_is_set(vcpu_vpmu(curr), VPMU_CPU_HAS_DS) )
+ {
+ d |= cpufeat_mask(X86_FEATURE_DS);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
+ c |= cpufeat_mask(X86_FEATURE_DTES64);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
+ c |= cpufeat_mask(X86_FEATURE_DSCPL);
+ }
+
c |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
break;
@@ -1224,6 +1234,16 @@ void pv_cpuid(struct cpu_user_regs *regs)
a = 0;
break;
+ case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ !vpmu_enabled(curr) )
+ goto unsupported;
+
+ /* Report at most version 3 since that's all we currently emulate. */
+ if ( (a & 0xff) > 3 )
+ a = (a & ~0xff) | 3;
+ break;
+
case XSTATE_CPUID:
if ( !is_control_domain(currd) && !is_hardware_domain(currd) )
@@ -1329,9 +1349,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
b &= pv_featureset[FEATURESET_e8b];
break;
- case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
- break;
-
case 0x00000005: /* MONITOR/MWAIT */
case 0x0000000b: /* Extended Topology Enumeration */
case 0x8000000a: /* SVM revision and features */
@@ -1344,9 +1361,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
}
out:
- /* VPMU may decide to modify some of the leaves */
- vpmu_do_cpuid(leaf, &a, &b, &c, &d);
-
regs->eax = a;
regs->ebx = b;
regs->ecx = c;
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index ed9ec07..d1dda4b 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -25,6 +25,7 @@
#define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu)
#define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu)
+#define vpmu_enabled(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_CONTEXT_ALLOCATED)
#define MSR_TYPE_COUNTER 0
#define MSR_TYPE_CTRL 1
@@ -42,9 +43,6 @@ struct arch_vpmu_ops {
uint64_t supported);
int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
int (*do_interrupt)(struct cpu_user_regs *regs);
- void (*do_cpuid)(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
void (*arch_vpmu_destroy)(struct vcpu *v);
int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
@@ -77,6 +75,10 @@ struct vpmu_struct {
/* PV(H) guests: VPMU registers are accessed by guest from shared page */
#define VPMU_CACHED 0x40
+/* Intel-specific VPMU features */
+#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
+#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
+
static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
{
vpmu->flags |= mask;
@@ -103,8 +105,6 @@ void vpmu_lvtpc_update(uint32_t val);
int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
uint64_t supported, bool_t is_write);
void vpmu_do_interrupt(struct cpu_user_regs *regs);
-void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
void vpmu_initialise(struct vcpu *v);
void vpmu_destroy(struct vcpu *v);
void vpmu_save(struct vcpu *v);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |