[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] tools/pvh: set coherent MTRR state for all vCPUs
Instead of just doing it for the BSP. This requires storing the maximum number of possible vCPUs in xc_dom_image. This has been a latent bug so far because PVH doesn't yet support pci-passthrough, so the effective memory cache attribute is forced to WB by the hypervisor. Note also that even without this in place vCPU#0 is preferred in certain scenarios in order to calculate the memory cache attributes. Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx> --- Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- Changes since v1: - s/nr_vcpus/max_vcpus/. - assert max_vcpus is set. --- tools/libxc/include/xc_dom.h | 3 ++ tools/libxc/xc_dom_x86.c | 71 +++++++++++++++++++++++++----------- tools/libxl/libxl_dom.c | 2 + 3 files changed, 54 insertions(+), 22 deletions(-) diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h index 0b5a632d3c..5900bbe8fa 100644 --- a/tools/libxc/include/xc_dom.h +++ b/tools/libxc/include/xc_dom.h @@ -230,6 +230,9 @@ struct xc_dom_image { #endif xen_pfn_t vuart_gfn; + + /* Number of vCPUs */ + unsigned int max_vcpus; }; /* --- pluggable kernel loader ------------------------------------- */ diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c index d77f2d6f62..77a4c6ccd0 100644 --- a/tools/libxc/xc_dom_x86.c +++ b/tools/libxc/xc_dom_x86.c @@ -955,17 +955,16 @@ static int vcpu_hvm(struct xc_dom_image *dom) HVM_SAVE_TYPE(HEADER) header; struct hvm_save_descriptor cpu_d; HVM_SAVE_TYPE(CPU) cpu; - struct hvm_save_descriptor mtrr_d; - HVM_SAVE_TYPE(MTRR) mtrr; struct hvm_save_descriptor end_d; HVM_SAVE_TYPE(END) end; } bsp_ctx; - const HVM_SAVE_TYPE(MTRR) *mtrr_record; uint8_t *full_ctx = NULL; int rc; DOMPRINTF_CALLED(dom->xch); + assert(dom->max_vcpus); + /* * Get the full HVM context in order to have the header, it is not * possible to get the header with getcontext_partial, and crafting one @@ -1034,35 +1033,63 @@ static int vcpu_hvm(struct xc_dom_image *dom) if ( dom->start_info_seg.pfn ) bsp_ctx.cpu.rbx = dom->start_info_seg.pfn << PAGE_SHIFT; - /* Set the MTRR. */ - bsp_ctx.mtrr_d.typecode = HVM_SAVE_CODE(MTRR); - bsp_ctx.mtrr_d.instance = 0; - bsp_ctx.mtrr_d.length = HVM_SAVE_LENGTH(MTRR); + /* Set the end descriptor. */ + bsp_ctx.end_d.typecode = HVM_SAVE_CODE(END); + bsp_ctx.end_d.instance = 0; + bsp_ctx.end_d.length = HVM_SAVE_LENGTH(END); - mtrr_record = hvm_get_save_record(full_ctx, HVM_SAVE_CODE(MTRR), 0); - if ( !mtrr_record ) + /* TODO: maybe this should be a firmware option instead? */ + if ( !dom->device_model ) { - xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, - "%s: unable to get MTRR save record", __func__); - goto out; - } + struct { + struct hvm_save_descriptor header_d; + HVM_SAVE_TYPE(HEADER) header; + struct hvm_save_descriptor mtrr_d; + HVM_SAVE_TYPE(MTRR) mtrr; + struct hvm_save_descriptor end_d; + HVM_SAVE_TYPE(END) end; + } mtrr = { + .header_d = bsp_ctx.header_d, + .header = bsp_ctx.header, + .mtrr_d.typecode = HVM_SAVE_CODE(MTRR), + .mtrr_d.length = HVM_SAVE_LENGTH(MTRR), + .end_d = bsp_ctx.end_d, + .end = bsp_ctx.end, + }; + const HVM_SAVE_TYPE(MTRR) *mtrr_record = + hvm_get_save_record(full_ctx, HVM_SAVE_CODE(MTRR), 0); + unsigned int i; + + if ( !mtrr_record ) + { + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, + "%s: unable to get MTRR save record", __func__); + goto out; + } - memcpy(&bsp_ctx.mtrr, mtrr_record, sizeof(bsp_ctx.mtrr)); + memcpy(&mtrr.mtrr, mtrr_record, sizeof(mtrr.mtrr)); - /* TODO: maybe this should be a firmware option instead? */ - if ( !dom->device_model ) /* * Enable MTRR, set default type to WB. * TODO: add MMIO areas as UC when passthrough is supported. */ - bsp_ctx.mtrr.msr_mtrr_def_type = MTRR_TYPE_WRBACK | - MTRR_DEF_TYPE_ENABLE; + mtrr.mtrr.msr_mtrr_def_type = MTRR_TYPE_WRBACK | MTRR_DEF_TYPE_ENABLE; - /* Set the end descriptor. */ - bsp_ctx.end_d.typecode = HVM_SAVE_CODE(END); - bsp_ctx.end_d.instance = 0; - bsp_ctx.end_d.length = HVM_SAVE_LENGTH(END); + for ( i = 0; i < dom->max_vcpus; i++ ) + { + mtrr.mtrr_d.instance = i; + rc = xc_domain_hvm_setcontext(dom->xch, dom->guest_domid, + (uint8_t *)&mtrr, sizeof(mtrr)); + if ( rc != 0 ) + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, + "%s: SETHVMCONTEXT failed (rc=%d)", __func__, rc); + } + } + /* + * Loading the BSP context should be done in the last call to setcontext, + * since each setcontext call will put all vCPUs down. + */ rc = xc_domain_hvm_setcontext(dom->xch, dom->guest_domid, (uint8_t *)&bsp_ctx, sizeof(bsp_ctx)); if ( rc != 0 ) diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index 8a8a32c699..c66f3893d7 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -803,6 +803,7 @@ int libxl__build_pv(libxl__gc *gc, uint32_t domid, dom->xenstore_evtchn = state->store_port; dom->xenstore_domid = state->store_domid; dom->claim_enabled = libxl_defbool_val(info->claim_mode); + dom->max_vcpus = info->max_vcpus; if (info->num_vnuma_nodes != 0) { unsigned int i; @@ -1256,6 +1257,7 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid, dom->mmio_start = mmio_start; dom->vga_hole_size = device_model ? LIBXL_VGA_HOLE_SIZE : 0; dom->device_model = device_model; + dom->max_vcpus = info->max_vcpus; rc = libxl__domain_device_construct_rdm(gc, d_config, info->u.hvm.rdm_mem_boundary_memkb*1024, -- 2.19.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |