[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 08/12] xen/x86: setup PVHv2 Dom0 CPUs
Initialize Dom0 BSP/APs and setup the memory and IO permissions. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- The logic used to setup the CPUID leaves is extremely simplistic (and probably wrong for hardware different than mine). I'm not sure what's the best way to deal with this, the code that currently sets the CPUID leaves for HVM guests lives in libxc, maybe moving it xen/common would be better? --- xen/arch/x86/domain_build.c | 103 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c index df6354a..89ef59e 100644 --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -40,6 +40,7 @@ #include <public/version.h> #include <public/arch-x86/hvm/start_info.h> +#include <public/hvm/hvm_vcpu.h> static long __initdata dom0_nrpages; static long __initdata dom0_min_nrpages; @@ -1948,6 +1949,101 @@ out: return rc; } +static int __init hvm_setup_cpus(struct domain *d, paddr_t entry, + paddr_t start_info) +{ + vcpu_hvm_context_t cpu_ctx; + struct vcpu *v = d->vcpu[0]; + int cpu, i, rc; + struct { + uint32_t index; + uint32_t count; + } cpuid_leaves[] = { + {0, XEN_CPUID_INPUT_UNUSED}, + {1, XEN_CPUID_INPUT_UNUSED}, + {2, XEN_CPUID_INPUT_UNUSED}, + {4, 0}, + {4, 1}, + {4, 2}, + {4, 3}, + {4, 4}, + {7, 0}, + {0xa, XEN_CPUID_INPUT_UNUSED}, + {0xd, 0}, + {0x80000000, XEN_CPUID_INPUT_UNUSED}, + {0x80000001, XEN_CPUID_INPUT_UNUSED}, + {0x80000002, XEN_CPUID_INPUT_UNUSED}, + {0x80000003, XEN_CPUID_INPUT_UNUSED}, + {0x80000004, XEN_CPUID_INPUT_UNUSED}, + {0x80000005, XEN_CPUID_INPUT_UNUSED}, + {0x80000006, XEN_CPUID_INPUT_UNUSED}, + {0x80000007, XEN_CPUID_INPUT_UNUSED}, + {0x80000008, XEN_CPUID_INPUT_UNUSED}, + }; + + printk("** Setting up BSP/APs **\n"); + + cpu = v->processor; + for ( i = 1; i < d->max_vcpus; i++ ) + { + cpu = cpumask_cycle(cpu, &dom0_cpus); + setup_dom0_vcpu(d, i, cpu); + } + + memset(&cpu_ctx, 0, sizeof(cpu_ctx)); + + cpu_ctx.mode = VCPU_HVM_MODE_32B; + + cpu_ctx.cpu_regs.x86_32.ebx = start_info; + cpu_ctx.cpu_regs.x86_32.eip = entry; + cpu_ctx.cpu_regs.x86_32.cr0 = X86_CR0_PE | X86_CR0_ET; + + cpu_ctx.cpu_regs.x86_32.cs_limit = ~0u; + cpu_ctx.cpu_regs.x86_32.ds_limit = ~0u; + cpu_ctx.cpu_regs.x86_32.ss_limit = ~0u; + cpu_ctx.cpu_regs.x86_32.tr_limit = 0x67; + cpu_ctx.cpu_regs.x86_32.cs_ar = 0xc9b; + cpu_ctx.cpu_regs.x86_32.ds_ar = 0xc93; + cpu_ctx.cpu_regs.x86_32.ss_ar = 0xc93; + cpu_ctx.cpu_regs.x86_32.tr_ar = 0x8b; + + rc = arch_set_info_hvm_guest(v, &cpu_ctx); + if ( rc ) + { + printk("Unable to setup Dom0 BSP context: %d\n", rc); + return rc; + } + clear_bit(_VPF_down, &v->pause_flags); + + for ( i = 0; i < ARRAY_SIZE(cpuid_leaves); i++ ) + { + d->arch.cpuids[i].input[0] = cpuid_leaves[i].index; + d->arch.cpuids[i].input[1] = cpuid_leaves[i].count; + if ( d->arch.cpuids[i].input[1] == XEN_CPUID_INPUT_UNUSED ) + cpuid(d->arch.cpuids[i].input[0], &d->arch.cpuids[i].eax, + &d->arch.cpuids[i].ebx, &d->arch.cpuids[i].ecx, + &d->arch.cpuids[i].edx); + else + cpuid_count(d->arch.cpuids[i].input[0], d->arch.cpuids[i].input[1], + &d->arch.cpuids[i].eax, &d->arch.cpuids[i].ebx, + &d->arch.cpuids[i].ecx, &d->arch.cpuids[i].edx); + /* XXX: we need to do much more filtering here. */ + if ( d->arch.cpuids[i].input[0] == 1 ) + d->arch.cpuids[i].ecx &= ~X86_FEATURE_VMX; + } + + rc = setup_permissions(d); + if ( rc ) + { + panic("Unable to setup Dom0 permissions: %d\n", rc); + return rc; + } + + update_domain_wallclock_time(d); + + return 0; +} + static int __init construct_dom0_hvm(struct domain *d, const module_t *image, unsigned long image_headroom, module_t *initrd, @@ -1982,6 +2078,13 @@ static int __init construct_dom0_hvm(struct domain *d, const module_t *image, return rc; } + rc = hvm_setup_cpus(d, entry, start_info); + if ( rc ) + { + printk("Failed to setup Dom0 CPUs: %d\n", rc); + return rc; + } + return 0; } -- 2.7.4 (Apple Git-66) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |