--- a/xen/arch/x86/acpi/suspend.c +++ b/xen/arch/x86/acpi/suspend.c @@ -35,7 +35,8 @@ void save_rest_processor_state(void) rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); rdmsrl(MSR_CSTAR, saved_cstar); rdmsrl(MSR_LSTAR, saved_lstar); - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) { rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); @@ -64,7 +65,8 @@ void restore_rest_processor_state(void) wrmsrl(MSR_GS_BASE, saved_gs_base); wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) { /* Recover sysenter MSRs */ wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); --- a/xen/arch/x86/cpu/Makefile +++ b/xen/arch/x86/cpu/Makefile @@ -2,10 +2,10 @@ subdir-y += mcheck subdir-y += mtrr obj-y += amd.o +obj-y += centaur.o obj-y += common.o obj-y += intel.o obj-y += intel_cacheinfo.o -obj-$(x86_32) += centaur.o obj-$(x86_32) += cyrix.o obj-$(x86_32) += transmeta.o --- a/xen/arch/x86/cpu/centaur.c +++ b/xen/arch/x86/cpu/centaur.c @@ -45,8 +45,9 @@ static void __init init_c3(struct cpuinf c->x86_capability[5] = cpuid_edx(0xC0000001); } +#ifdef __i386__ /* Cyrix III family needs CX8 & PGE explicity enabled. */ - if (c->x86_model >=6 && c->x86_model <= 9) { + if (c->x86_model >= 6 && c->x86_model <= 13) { rdmsrl(MSR_VIA_FCR, msr_content); wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7)); set_bit(X86_FEATURE_CX8, c->x86_capability); @@ -55,6 +56,12 @@ static void __init init_c3(struct cpuinf /* Before Nehemiah, the C3's had 3dNOW! */ if (c->x86_model >=6 && c->x86_model <9) set_bit(X86_FEATURE_3DNOW, c->x86_capability); +#endif + + if (c->x86 == 0x6 && c->x86_model >= 0xf) { + c->x86_cache_alignment = c->x86_clflush_size * 2; + set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + } get_model_name(c); display_cacheinfo(c); @@ -62,14 +69,17 @@ static void __init init_c3(struct cpuinf static void __init init_centaur(struct cpuinfo_x86 *c) { +#ifdef __i386__ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_bit(0*32+31, c->x86_capability); +#endif if (c->x86 == 6) init_c3(c); } +#ifdef __i386__ static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) { /* VIA C3 CPUs (670-68F) need further shifting. */ @@ -84,12 +94,15 @@ static unsigned int centaur_size_cache(s return size; } +#endif static struct cpu_dev centaur_cpu_dev __cpuinitdata = { .c_vendor = "Centaur", .c_ident = { "CentaurHauls" }, .c_init = init_centaur, +#ifdef __i386__ .c_size_cache = centaur_size_cache, +#endif }; int __init centaur_init_cpu(void) @@ -97,5 +110,3 @@ int __init centaur_init_cpu(void) cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev; return 0; } - -//early_arch_initcall(centaur_init_cpu); --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -114,6 +114,7 @@ static int __init hvm_enable(void) switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: fns = start_vmx(); break; case X86_VENDOR_AMD: --- a/xen/arch/x86/hvm/nestedhvm.c +++ b/xen/arch/x86/hvm/nestedhvm.c @@ -151,13 +151,15 @@ nestedhvm_is_n2(struct vcpu *v) static int __init nestedhvm_setup(void) { - /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */ - unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3; - unsigned int i, order = get_order_from_pages(nr); + unsigned int i, nr, order; if ( !hvm_funcs.name ) return 0; + /* Same format and size as hvm_io_bitmap (VMX needs only 2 pages). */ + nr = !strcmp(hvm_funcs.name, "VMX") ? 2 : 3; + order = get_order_from_pages(nr); + /* shadow_io_bitmaps can't be declared static because * they must fulfill hw requirements (page aligned section) * and doing so triggers the ASSERT(va >= XEN_VIRT_START) --- a/xen/arch/x86/hvm/viridian.c +++ b/xen/arch/x86/hvm/viridian.c @@ -156,8 +156,7 @@ static void enable_hypercall_page(struct *(u32 *)(p + 1) = 0x80000000; *(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */ *(u8 *)(p + 6) = 0x01; - *(u8 *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - ? 0xc1 : 0xd9); + *(u8 *)(p + 7) = (!strcmp(hvm_funcs.name, "VMX") ? 0xc1 : 0xd9); *(u8 *)(p + 8) = 0xc3; /* ret */ memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */ --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -1186,7 +1186,7 @@ int vlapic_init(struct vcpu *v) #ifdef __i386__ /* 32-bit VMX may be limited to 32-bit physical addresses. */ - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + if ( !strcmp(hvm_funcs.name, "VMX") ) memflags |= MEMF_bits(32); #endif if (vlapic->regs_page == NULL) --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -608,7 +608,7 @@ int mem_event_domctl(struct domain *d, x break; /* Currently only EPT is supported */ - if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) + if ( strcmp(hvm_funcs.name, "VMX") ) break; rc = mem_event_enable(d, mec, med, _VPF_mem_access, --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -83,7 +83,7 @@ static void p2m_initialise(struct domain p2m->cr3 = CR3_EADDR; - if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ) + if ( hap_enabled(d) && !strcmp(hvm_funcs.name, "VMX") ) ept_p2m_init(p2m); else p2m_pt_init(p2m); --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -399,7 +399,8 @@ void __devinit subarch_percpu_traps_init wrmsrl(MSR_LSTAR, (unsigned long)stack); stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64); - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) { /* SYSENTER entry. */ wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);