|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v25 08/15] x86/VPMU: When handling MSR accesses, leave fault injection to callers
Am Freitag 19 Juni 2015, 14:44:39 schrieb Boris Ostrovsky:
> Hypervisor cannot easily inject faults into PV guests from arch-specific VPMU
> read/write MSR handlers (unlike it is in the case of HVM guests).
>
> With this patch vpmu_do_msr() will return an error code to indicate whether an
> error was encountered during MSR processing (instead of stating that the
> access
> was to a VPMU register). The caller will then decide how to deal with the
> error.
>
> As part of this patch we also check for validity of certain MSR accesses right
> when we determine which register is being written, as opposed to postponing
> this
> until later.
>
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> ---
> Changes in v25:
> * Updated commit message to mention reason for changing vpmu_do_msr return
> values
Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
>
> xen/arch/x86/hvm/svm/svm.c | 6 ++-
> xen/arch/x86/hvm/svm/vpmu.c | 6 +--
> xen/arch/x86/hvm/vmx/vmx.c | 24 ++++++++---
> xen/arch/x86/hvm/vmx/vpmu_core2.c | 86
> +++++++++++++++------------------------
> 4 files changed, 57 insertions(+), 65 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 680eebe..3b5d15d 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1708,7 +1708,8 @@ static int svm_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
> case MSR_AMD_FAM15H_EVNTSEL3:
> case MSR_AMD_FAM15H_EVNTSEL4:
> case MSR_AMD_FAM15H_EVNTSEL5:
> - vpmu_do_rdmsr(msr, msr_content);
> + if ( vpmu_do_rdmsr(msr, msr_content) )
> + goto gpf;
> break;
>
> case MSR_AMD64_DR0_ADDRESS_MASK:
> @@ -1859,7 +1860,8 @@ static int svm_msr_write_intercept(unsigned int msr,
> uint64_t msr_content)
> case MSR_AMD_FAM15H_EVNTSEL3:
> case MSR_AMD_FAM15H_EVNTSEL4:
> case MSR_AMD_FAM15H_EVNTSEL5:
> - vpmu_do_wrmsr(msr, msr_content, 0);
> + if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> + goto gpf;
> break;
>
> case MSR_IA32_MCx_MISC(4): /* Threshold register */
> diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> index a8572a6..74d03a5 100644
> --- a/xen/arch/x86/hvm/svm/vpmu.c
> +++ b/xen/arch/x86/hvm/svm/vpmu.c
> @@ -305,7 +305,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
> is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
> {
> if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
> - return 1;
> + return 0;
> vpmu_set(vpmu, VPMU_RUNNING);
>
> if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
> @@ -335,7 +335,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
>
> /* Write to hw counters */
> wrmsrl(msr, msr_content);
> - return 1;
> + return 0;
> }
>
> static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> @@ -353,7 +353,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t
> *msr_content)
>
> rdmsrl(msr, *msr_content);
>
> - return 1;
> + return 0;
> }
>
> static void amd_vpmu_destroy(struct vcpu *v)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 50e11dd..db1fa82 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2166,12 +2166,17 @@ static int vmx_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
> *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
> MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
> /* Perhaps vpmu will change some bits. */
> + /* FALLTHROUGH */
> + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
> + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
> + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> + case MSR_IA32_PEBS_ENABLE:
> + case MSR_IA32_DS_AREA:
> if ( vpmu_do_rdmsr(msr, msr_content) )
> - goto done;
> + goto gp_fault;
> break;
> default:
> - if ( vpmu_do_rdmsr(msr, msr_content) )
> - break;
> if ( passive_domain_do_rdmsr(msr, msr_content) )
> goto done;
> switch ( long_mode_do_msr_read(msr, msr_content) )
> @@ -2347,7 +2352,7 @@ static int vmx_msr_write_intercept(unsigned int msr,
> uint64_t msr_content)
> if ( msr_content & ~supported )
> {
> /* Perhaps some other bits are supported in vpmu. */
> - if ( !vpmu_do_wrmsr(msr, msr_content, supported) )
> + if ( vpmu_do_wrmsr(msr, msr_content, supported) )
> break;
> }
> if ( msr_content & IA32_DEBUGCTLMSR_LBR )
> @@ -2375,9 +2380,16 @@ static int vmx_msr_write_intercept(unsigned int msr,
> uint64_t msr_content)
> if ( !nvmx_msr_write_intercept(msr, msr_content) )
> goto gp_fault;
> break;
> + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
> + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(7):
> + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> + case MSR_IA32_PEBS_ENABLE:
> + case MSR_IA32_DS_AREA:
> + if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> + goto gp_fault;
> + break;
> default:
> - if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> - return X86EMUL_OKAY;
> if ( passive_domain_do_wrmsr(msr, msr_content) )
> return X86EMUL_OKAY;
>
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index e7642e5..9710149 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -454,36 +454,41 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
> IA32_DEBUGCTLMSR_BTS_OFF_USR;
> if ( !(msr_content & ~supported) &&
> vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> - return 1;
> + return 0;
> if ( (msr_content & supported) &&
> !vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> printk(XENLOG_G_WARNING
> "%pv: Debug Store unsupported on this CPU\n",
> current);
> }
> - return 0;
> + return -EINVAL;
> }
>
> ASSERT(!supported);
>
> + if ( type == MSR_TYPE_COUNTER &&
> + (msr_content &
> + ~((1ull << core2_get_bitwidth_fix_count()) - 1)) )
> + /* Writing unsupported bits to a fixed counter */
> + return -EINVAL;
> +
> core2_vpmu_cxt = vpmu->context;
> enabled_cntrs = vpmu->priv_context;
> switch ( msr )
> {
> case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> core2_vpmu_cxt->global_status &= ~msr_content;
> - return 1;
> + return 0;
> case MSR_CORE_PERF_GLOBAL_STATUS:
> gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
> "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
> - hvm_inject_hw_exception(TRAP_gp_fault, 0);
> - return 1;
> + return -EINVAL;
> case MSR_IA32_PEBS_ENABLE:
> if ( msr_content & 1 )
> gdprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, "
> "which is not supported.\n");
> core2_vpmu_cxt->pebs_enable = msr_content;
> - return 1;
> + return 0;
> case MSR_IA32_DS_AREA:
> if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
> {
> @@ -492,18 +497,21 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
> gdprintk(XENLOG_WARNING,
> "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
> msr_content);
> - hvm_inject_hw_exception(TRAP_gp_fault, 0);
> - return 1;
> + return -EINVAL;
> }
> core2_vpmu_cxt->ds_area = msr_content;
> break;
> }
> gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n");
> - return 1;
> + return 0;
> case MSR_CORE_PERF_GLOBAL_CTRL:
> global_ctrl = msr_content;
> break;
> case MSR_CORE_PERF_FIXED_CTR_CTRL:
> + if ( msr_content &
> + ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) )
> + return -EINVAL;
> +
> vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
> *enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32);
> if ( msr_content != 0 )
> @@ -526,6 +534,9 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
> struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
> vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
>
> + if ( msr_content & (~((1ull << 32) - 1)) )
> + return -EINVAL;
> +
> vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
>
> if ( msr_content & (1ULL << 22) )
> @@ -537,45 +548,17 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
> }
> }
>
> + if ( type != MSR_TYPE_GLOBAL )
> + wrmsrl(msr, msr_content);
> + else
> + vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> +
> if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) )
> vpmu_set(vpmu, VPMU_RUNNING);
> else
> vpmu_reset(vpmu, VPMU_RUNNING);
>
> - if ( type != MSR_TYPE_GLOBAL )
> - {
> - u64 mask;
> - int inject_gp = 0;
> - switch ( type )
> - {
> - case MSR_TYPE_ARCH_CTRL: /* MSR_P6_EVNTSEL[0,...] */
> - mask = ~((1ull << 32) - 1);
> - if (msr_content & mask)
> - inject_gp = 1;
> - break;
> - case MSR_TYPE_CTRL: /* IA32_FIXED_CTR_CTRL */
> - if ( msr == MSR_IA32_DS_AREA )
> - break;
> - /* 4 bits per counter, currently 3 fixed counters implemented. */
> - mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1);
> - if (msr_content & mask)
> - inject_gp = 1;
> - break;
> - case MSR_TYPE_COUNTER: /* IA32_FIXED_CTR[0-2] */
> - mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1);
> - if (msr_content & mask)
> - inject_gp = 1;
> - break;
> - }
> - if (inject_gp)
> - hvm_inject_hw_exception(TRAP_gp_fault, 0);
> - else
> - wrmsrl(msr, msr_content);
> - }
> - else
> - vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> -
> - return 1;
> + return 0;
> }
>
> static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> @@ -603,19 +586,14 @@ static int core2_vpmu_do_rdmsr(unsigned int msr,
> uint64_t *msr_content)
> rdmsrl(msr, *msr_content);
> }
> }
> - else
> + else if ( msr == MSR_IA32_MISC_ENABLE )
> {
> /* Extension for BTS */
> - if ( msr == MSR_IA32_MISC_ENABLE )
> - {
> - if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> - *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
> - }
> - else
> - return 0;
> + if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> + *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
> }
>
> - return 1;
> + return 0;
> }
>
> static void core2_vpmu_do_cpuid(unsigned int input,
> @@ -760,9 +738,9 @@ static int core2_no_vpmu_do_rdmsr(unsigned int msr,
> uint64_t *msr_content)
> {
> int type = -1, index = -1;
> if ( !is_core2_vpmu_msr(msr, &type, &index) )
> - return 0;
> + return -EINVAL;
> *msr_content = 0;
> - return 1;
> + return 0;
> }
>
> /*
>
--
Company details: http://ts.fujitsu.com/imprint.html
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |