..., which adds masking of the xsave feature leaf. Also add back (and fix to actually make it do what it was supposed to do from the beginning) the printing of what specific masking couldn't be done in case the user requested something the hardware doesn't support. Signed-off-by: Jan Beulich --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -20,10 +20,15 @@ size_param("cachesize", cachesize_overri static bool_t __cpuinitdata use_xsave = 1; boolean_param("xsave", use_xsave); + unsigned int __devinitdata opt_cpuid_mask_ecx = ~0u; integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx); unsigned int __devinitdata opt_cpuid_mask_edx = ~0u; integer_param("cpuid_mask_edx", opt_cpuid_mask_edx); + +unsigned int __devinitdata opt_cpuid_mask_xsave_eax = ~0u; +integer_param("cpuid_mask_xsave_eax", opt_cpuid_mask_xsave_eax); + unsigned int __devinitdata opt_cpuid_mask_ext_ecx = ~0u; integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx); unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u; --- a/xen/arch/x86/cpu/cpu.h +++ b/xen/arch/x86/cpu/cpu.h @@ -22,6 +22,7 @@ struct cpu_dev { extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx; +extern unsigned int opt_cpuid_mask_xsave_eax; extern unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx; extern int get_model_name(struct cpuinfo_x86 *c); --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -57,6 +57,9 @@ void set_cpuid_faulting(bool_t enable) */ static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c) { + u32 eax, edx; + const char *extra = ""; + if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx & opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) return; @@ -71,7 +74,11 @@ static void __devinit set_cpuidmask(cons wrmsr(MSR_INTEL_CPUID_FEATURE_MASK, opt_cpuid_mask_ecx, opt_cpuid_mask_edx); - if (!~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) + if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) + extra = "extended "; + else if (~opt_cpuid_mask_xsave_eax) + extra = "xsave "; + else return; break; /* @@ -92,11 +99,25 @@ static void __devinit set_cpuidmask(cons wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK, opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx); + if (!~opt_cpuid_mask_xsave_eax) + return; + extra = "xsave "; + break; + case 0x2a: + wrmsr(MSR_INTEL_CPUID1_FEATURE_MASK_V2, + opt_cpuid_mask_ecx, + opt_cpuid_mask_edx); + rdmsr(MSR_INTEL_CPUIDD_01_FEATURE_MASK, eax, edx); + wrmsr(MSR_INTEL_CPUIDD_01_FEATURE_MASK, + opt_cpuid_mask_xsave_eax, edx); + wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK_V2, + opt_cpuid_mask_ext_ecx, + opt_cpuid_mask_ext_edx); return; } - printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n", - smp_processor_id()); + printk(XENLOG_ERR "Cannot set CPU %sfeature mask on CPU#%d\n", + extra, smp_processor_id()); } void __devinit early_intel_workaround(struct cpuinfo_x86 *c) --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -492,6 +492,10 @@ #define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130 #define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131 +#define MSR_INTEL_CPUID1_FEATURE_MASK_V2 0x00000132 +#define MSR_INTEL_CPUID80000001_FEATURE_MASK_V2 0x00000133 +#define MSR_INTEL_CPUIDD_01_FEATURE_MASK 0x00000134 + /* Intel cpuid faulting MSRs */ #define MSR_INTEL_PLATFORM_INFO 0x000000ce #define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140