[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/3] x86/AMD: support further feature masking MSRs



On 07/04/14 10:43, Jan Beulich wrote:
Newer AMD CPUs also allow masking CPUID leaf 6 ECX and CPUID leaf 7
sub-leaf 0 EAX and EBX.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Aravind Gopalakrishnan<aravind.gopalakrishnan@xxxxxxx>
---
v2: Adjust for new prior patch. Apply leaf 6 mask only on Fam15.

--- 2014-03-17.orig/docs/misc/xen-command-line.markdown	2014-04-04 11:27:32.000000000 +0200
+++ 2014-03-17/docs/misc/xen-command-line.markdown	2014-03-18 14:37:31.000000000 +0100
@@ -331,24 +331,42 @@ Indicate where the responsibility for dr
 ### cpuid\_mask\_cpu (AMD only)
 > `= fam_0f_rev_c | fam_0f_rev_d | fam_0f_rev_e | fam_0f_rev_f | fam_0f_rev_g | fam_10_rev_b | fam_10_rev_c | fam_11_rev_b`
 
-If the other **cpuid\_mask\_{,ext\_}e{c,d}x** options are fully set
-(unspecified on the command line), specify a pre-canned cpuid mask to
-mask the current processor down to appear as the specified processor.
-It is important to ensure that all hosts in a pool appear the same to
-guests to allow successful live migration.
+If the other **cpuid\_mask\_{,ext\_,thermal\_,l7s0\_}e{a,b,c,d}x**
+options are fully set (unspecified on the command line), specify a
+pre-canned cpuid mask to mask the current processor down to appear as
+the specified processor. It is important to ensure that all hosts in a
+pool appear the same to guests to allow successful live migration.
 
-### cpuid\_mask\_ ecx,edx,ext\_ecx,ext\_edx,xsave_eax
+### cpuid\_mask\_{{,ext\_}ecx,edx}
 > `= <integer>`
 
 > Default: `~0` (all bits set)
 
-These five command line parameters are used to specify cpuid masks to
+These four command line parameters are used to specify cpuid masks to
 help with cpuid levelling across a pool of hosts.  Setting a bit in
 the mask indicates that the feature should be enabled, while clearing
 a bit in the mask indicates that the feature should be disabled.  It
 is important to ensure that all hosts in a pool appear the same to
 guests to allow successful live migration.
 
+### cpuid\_mask\_xsave\_eax (Intel only)
+> `= <integer>`
+
+> Default: `~0` (all bits set)
+
+This command line parameter is also used to specify a cpuid mask to
+help with cpuid levelling across a pool of hosts.  See the description
+of the other respective options above.
+
+### cpuid\_mask\_{l7s0\_{eax,ebx},thermal\_ecx} (AMD only)
+> `= <integer>`
+
+> Default: `~0` (all bits set)
+
+These three command line parameters are also used to specify cpuid
+masks to help with cpuid levelling across a pool of hosts.  See the
+description of the other respective options above.
+
 ### cpuidle
 > `= <boolean>`
 
--- 2014-03-17.orig/xen/arch/x86/cpu/amd.c	2014-04-04 11:31:51.000000000 +0200
+++ 2014-03-17/xen/arch/x86/cpu/amd.c	2014-04-04 11:35:31.000000000 +0200
@@ -30,9 +30,17 @@
  * "fam_10_rev_c"
  * "fam_11_rev_b"
  */
-static char opt_famrev[14];
+static char __initdata opt_famrev[14];
 string_param("cpuid_mask_cpu", opt_famrev);
 
+static unsigned int __initdata opt_cpuid_mask_l7s0_eax = ~0u;
+integer_param("cpuid_mask_l7s0_eax", opt_cpuid_mask_l7s0_eax);
+static unsigned int __initdata opt_cpuid_mask_l7s0_ebx = ~0u;
+integer_param("cpuid_mask_l7s0_ebx", opt_cpuid_mask_l7s0_ebx);
+
+static unsigned int __initdata opt_cpuid_mask_thermal_ecx = ~0u;
+integer_param("cpuid_mask_thermal_ecx", opt_cpuid_mask_thermal_ecx);
+
 /* 1 = allow, 0 = don't allow guest creation, -1 = don't allow boot */
 s8 __read_mostly opt_allow_unsafe;
 boolean_param("allow_unsafe", opt_allow_unsafe);
@@ -96,7 +104,11 @@ static void __devinit set_cpuidmask(cons
 {
 	static unsigned int feat_ecx, feat_edx;
 	static unsigned int extfeat_ecx, extfeat_edx;
+	static unsigned int l7s0_eax, l7s0_ebx;
+	static unsigned int thermal_ecx;
+	static bool_t skip_l7s0_eax_ebx, skip_thermal_ecx;
 	static enum { not_parsed, no_mask, set_mask } status;
+	unsigned int eax, ebx, ecx, edx;
 
 	if (status == no_mask)
 		return;
@@ -104,7 +116,7 @@ static void __devinit set_cpuidmask(cons
 	if (status == set_mask)
 		goto setmask;
 
-	ASSERT((status == not_parsed) && (smp_processor_id() == 0));
+	ASSERT((status == not_parsed) && (c == &boot_cpu_data));
 	status = no_mask;
 
 	/* Fam11 doesn't support masking at all. */
@@ -112,11 +124,16 @@ static void __devinit set_cpuidmask(cons
 		return;
 
 	if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
-	      opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+	      opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
+	      opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx &
+	      opt_cpuid_mask_thermal_ecx)) {
 		feat_ecx = opt_cpuid_mask_ecx;
 		feat_edx = opt_cpuid_mask_edx;
 		extfeat_ecx = opt_cpuid_mask_ext_ecx;
 		extfeat_edx = opt_cpuid_mask_ext_edx;
+		l7s0_eax = opt_cpuid_mask_l7s0_eax;
+		l7s0_ebx = opt_cpuid_mask_l7s0_ebx;
+		thermal_ecx = opt_cpuid_mask_thermal_ecx;
 	} else if (*opt_famrev == '\0') {
 		return;
 	} else if (!strcmp(opt_famrev, "fam_0f_rev_c")) {
@@ -179,11 +196,39 @@ static void __devinit set_cpuidmask(cons
 	printk("Writing CPUID extended feature mask ECX:EDX -> %08Xh:%08Xh\n", 
 	       extfeat_ecx, extfeat_edx);
 
+	if (c->cpuid_level >= 7)
+		cpuid_count(7, 0, &eax, &ebx, &ecx, &edx);
+	else
+		ebx = eax = 0;
+	if ((eax | ebx) && ~(l7s0_eax & l7s0_ebx)) {
+		if (l7s0_eax > eax)
+			l7s0_eax = eax;
+		l7s0_ebx &= ebx;
+		printk("Writing CPUID leaf 7 subleaf 0 feature mask EAX:EBX -> %08Xh:%08Xh\n",
+		       l7s0_eax, l7s0_ebx);
+	} else
+		skip_l7s0_eax_ebx = 1;
+
+	/* Only Fam15 has the respective MSR. */
+	ecx = c->x86 == 0x15 && c->cpuid_level >= 6 ? cpuid_ecx(6) : 0;
+	if (ecx && ~thermal_ecx) {
+		thermal_ecx &= ecx;
+		printk("Writing CPUID thermal/power feature mask ECX -> %08Xh\n",
+		       thermal_ecx);
+	} else
+		skip_thermal_ecx = 1;
+
  setmask:
 	/* AMD processors prior to family 10h required a 32-bit password */
 	if (c->x86 >= 0x10) {
 		wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
 		wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
+		if (!skip_l7s0_eax_ebx)
+			wrmsr(MSR_AMD_L7S0_FEATURE_MASK, l7s0_ebx, l7s0_eax);
+		if (!skip_thermal_ecx) {
+			rdmsr(MSR_AMD_THRM_FEATURE_MASK, eax, edx);
+			wrmsr(MSR_AMD_THRM_FEATURE_MASK, thermal_ecx, edx);
+		}
 	} else {
 		wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
 		wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);

While editing this, can we remove this crazy split between wrmsr and wrmsr_amd ?  It is safe to use wrmsr_amd in all cases where wrmsr is needed.

I already did this as part of the prep work for per-VM masking, but in combination with above, the following should be fine:

wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
if (!skip_l7s0_eax_ebx)
    wrmsr(MSR_AMD_L7S0_FEATURE_MASK, l7s0_ebx, l7s0_eax);
if (!skip_thermal_ecx) {
    rdmsr(MSR_AMD_THRM_FEATURE_MASK, eax, edx);
    wrmsr(MSR_AMD_THRM_FEATURE_MASK, thermal_ecx, edx);
}

~Andrew

--- 2014-03-17.orig/xen/include/asm-x86/msr-index.h	2014-03-10 11:36:00.000000000 +0100
+++ 2014-03-17/xen/include/asm-x86/msr-index.h	2014-03-18 12:19:10.000000000 +0100
@@ -192,6 +192,8 @@
 #define MSR_AMD_FAM15H_EVNTSEL5		0xc001020a
 #define MSR_AMD_FAM15H_PERFCTR5		0xc001020b
 
+#define MSR_AMD_L7S0_FEATURE_MASK	0xc0011002
+#define MSR_AMD_THRM_FEATURE_MASK	0xc0011003
 #define MSR_K8_FEATURE_MASK		0xc0011004
 #define MSR_K8_EXT_FEATURE_MASK		0xc0011005
 




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.