[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH, v2] x86: enable VIA CPU support
On 21/09/2012 15:24, "Jan Beulich" <JBeulich@xxxxxxxx> wrote: > Newer VIA CPUs have both 64-bit and VMX support. Enable them to be > recognized for these purposes, at once stripping off any 32-bit CPU > only bits from the respective CPU support file. > > This particularly implies untying the VMX == Intel assumption in a few > places. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> > --- > v2: Use cpu_has_vmx instead of comparing hvm_funcs.name, as suggested > by Keir. Extend this to also use this and cpu_has_svm in > hvm_enable(), making the respective open coded checks in > start_{svm,vmx}() unnecessary. > > Note that my testing of this functionality wasn't as wide as I would > have hoped it to be, since the box I was provided only survived the > first few days - meanwhile it doesn't stay up long enough to just build > hypervisor and tools. Therefore, further fixes to fully support these > CPUs may be needed as the VIA folks themselves get to test that code. > > --- a/xen/arch/x86/acpi/suspend.c > +++ b/xen/arch/x86/acpi/suspend.c > @@ -32,7 +32,8 @@ void save_rest_processor_state(void) > rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); > rdmsrl(MSR_CSTAR, saved_cstar); > rdmsrl(MSR_LSTAR, saved_lstar); > - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) > + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || > + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) > { > rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); > rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); > @@ -59,7 +60,8 @@ void restore_rest_processor_state(void) > wrmsrl(MSR_GS_BASE, saved_gs_base); > wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); > > - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) > + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || > + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) > { > /* Recover sysenter MSRs */ > wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); > --- a/xen/arch/x86/cpu/Makefile > +++ b/xen/arch/x86/cpu/Makefile > @@ -2,10 +2,8 @@ subdir-y += mcheck > subdir-y += mtrr > > obj-y += amd.o > +obj-y += centaur.o > obj-y += common.o > obj-y += intel.o > obj-y += intel_cacheinfo.o > obj-y += mwait-idle.o > - > -# Keeping around for VIA support (JBeulich) > -# obj-$(x86_32) += centaur.o > --- a/xen/arch/x86/cpu/centaur.c > +++ b/xen/arch/x86/cpu/centaur.c > @@ -45,51 +45,25 @@ static void __init init_c3(struct cpuinf > c->x86_capability[5] = cpuid_edx(0xC0000001); > } > > - /* Cyrix III family needs CX8 & PGE explicity enabled. */ > - if (c->x86_model >=6 && c->x86_model <= 9) { > - rdmsrl(MSR_VIA_FCR, msr_content); > - wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7)); > - set_bit(X86_FEATURE_CX8, c->x86_capability); > + if (c->x86 == 0x6 && c->x86_model >= 0xf) { > + c->x86_cache_alignment = c->x86_clflush_size * 2; > + set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); > } > > - /* Before Nehemiah, the C3's had 3dNOW! */ > - if (c->x86_model >=6 && c->x86_model <9) > - set_bit(X86_FEATURE_3DNOW, c->x86_capability); > - > get_model_name(c); > display_cacheinfo(c); > } > > static void __init init_centaur(struct cpuinfo_x86 *c) > { > - /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; > - 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ > - clear_bit(0*32+31, c->x86_capability); > - > if (c->x86 == 6) > init_c3(c); > } > > -static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int > size) > -{ > - /* VIA C3 CPUs (670-68F) need further shifting. */ > - if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) > - size >>= 8; > - > - /* VIA also screwed up Nehemiah stepping 1, and made > - it return '65KB' instead of '64KB' > - - Note, it seems this may only be in engineering samples. */ > - if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65)) > - size -=1; > - > - return size; > -} > - > static struct cpu_dev centaur_cpu_dev __cpuinitdata = { > .c_vendor = "Centaur", > .c_ident = { "CentaurHauls" }, > .c_init = init_centaur, > - .c_size_cache = centaur_size_cache, > }; > > int __init centaur_init_cpu(void) > @@ -97,5 +71,3 @@ int __init centaur_init_cpu(void) > cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev; > return 0; > } > - > -//early_arch_initcall(centaur_init_cpu); > --- a/xen/arch/x86/cpu/common.c > +++ b/xen/arch/x86/cpu/common.c > @@ -522,6 +522,7 @@ void __init early_cpu_init(void) > { > intel_cpu_init(); > amd_init_cpu(); > + centaur_init_cpu(); > early_cpu_detect(); > } > /* > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -111,17 +111,10 @@ static int __init hvm_enable(void) > { > struct hvm_function_table *fns = NULL; > > - switch ( boot_cpu_data.x86_vendor ) > - { > - case X86_VENDOR_INTEL: > + if ( cpu_has_vmx ) > fns = start_vmx(); > - break; > - case X86_VENDOR_AMD: > + else if ( cpu_has_svm ) > fns = start_svm(); > - break; > - default: > - break; > - } > > if ( fns == NULL ) > return 0; > --- a/xen/arch/x86/hvm/nestedhvm.c > +++ b/xen/arch/x86/hvm/nestedhvm.c > @@ -152,7 +152,7 @@ static int __init > nestedhvm_setup(void) > { > /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */ > - unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3; > + unsigned nr = cpu_has_vmx ? 2 : 3; > unsigned int i, order = get_order_from_pages(nr); > > if ( !hvm_funcs.name ) > --- a/xen/arch/x86/hvm/svm/svm.c > +++ b/xen/arch/x86/hvm/svm/svm.c > @@ -1240,9 +1240,6 @@ struct hvm_function_table * __init start > { > bool_t printed = 0; > > - if ( !test_bit(X86_FEATURE_SVM, &boot_cpu_data.x86_capability) ) > - return NULL; > - > svm_host_osvw_reset(); > > if ( svm_cpu_up() ) > --- a/xen/arch/x86/hvm/viridian.c > +++ b/xen/arch/x86/hvm/viridian.c > @@ -156,8 +156,7 @@ static void enable_hypercall_page(struct > *(u32 *)(p + 1) = 0x80000000; > *(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */ > *(u8 *)(p + 6) = 0x01; > - *(u8 *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) > - ? 0xc1 : 0xd9); > + *(u8 *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9); > *(u8 *)(p + 8) = 0xc3; /* ret */ > memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */ > > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -1516,9 +1516,6 @@ static struct hvm_function_table __read_ > > struct hvm_function_table * __init start_vmx(void) > { > - if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) ) > - return NULL; > - > set_in_cr4(X86_CR4_VMXE); > > if ( vmx_cpu_up() ) > --- a/xen/arch/x86/mm/mem_event.c > +++ b/xen/arch/x86/mm/mem_event.c > @@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x > break; > > /* Currently only EPT is supported */ > - if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) > + if ( !cpu_has_vmx ) > break; > > rc = mem_event_enable(d, mec, med, _VPF_mem_access, > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -83,7 +83,7 @@ static void p2m_initialise(struct domain > > p2m->cr3 = CR3_EADDR; > > - if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ) > + if ( hap_enabled(d) && cpu_has_vmx ) > ept_p2m_init(p2m); > else > p2m_pt_init(p2m); > --- a/xen/arch/x86/x86_64/traps.c > +++ b/xen/arch/x86/x86_64/traps.c > @@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init > wrmsrl(MSR_LSTAR, (unsigned long)stack); > stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64); > > - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) > + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || > + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) > { > /* SYSENTER entry. */ > wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom); > > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |