[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/2] x86/vMCE: adjustments to unmmap_broken_page()
There's no need for more than an assertion as to the passed in MFN's validity, as the caller's prior call to offline_page() would not have succeeded on an invalid one. There's no use in checking both is_hvm_domain() and paging_mode_hap(), as the latter implies the former. Extend the P2M manipulation that's there also to PVH Dom0, merely having it using the prior PV Dom0 related behavioral assumption when the page type cannot be changed (yet). There's no point in P2M_UNMAP_TYPES including p2m_mmio_direct. The respective comment is bogus afaict, there are no RAM pages getting mapped with that type for the purpose of becoming UC. The sole RAM page getting mapped with this attribute is the (now global) APIC access MFN. (This page, if it went bad, shouldn't have any effect on the system anyway, as it never really gets accessed; it's only its address which matters.) Make the last function parameter type-safe. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/cpu/mcheck/mcaction.c +++ b/xen/arch/x86/cpu/mcheck/mcaction.c @@ -91,7 +91,7 @@ mc_memerr_dhandler(struct mca_binfo *bin ASSERT(d); gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT); - if ( unmmap_broken_page(d, mfn, gfn) ) + if ( unmmap_broken_page(d, mfn, _gfn(gfn)) ) { printk("Unmap broken memory %"PRI_mfn" for DOM%d failed\n", mfn_x(mfn), d->domain_id); --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -502,11 +502,9 @@ int fill_vmsr_data(struct mcinfo_bank *m return ret; } -/* It's said some ram is setup as mmio_direct for UC cache attribute */ -#define P2M_UNMAP_TYPES (p2m_to_mask(p2m_ram_rw) \ - | p2m_to_mask(p2m_ram_logdirty) \ - | p2m_to_mask(p2m_ram_ro) \ - | p2m_to_mask(p2m_mmio_direct)) +#define P2M_UNMAP_TYPES (p2m_to_mask(p2m_ram_rw) | \ + p2m_to_mask(p2m_ram_logdirty) | \ + p2m_to_mask(p2m_ram_ro)) /* * Currently all CPUs are redenzevous at the MCE softirq handler, no @@ -515,30 +513,25 @@ int fill_vmsr_data(struct mcinfo_bank *m * XXX following situation missed: * PoD, Foreign mapped, Granted, Shared */ -int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn) +int unmmap_broken_page(struct domain *d, mfn_t mfn, gfn_t gfn) { - mfn_t r_mfn; p2m_type_t pt; int rc; - /* Always trust dom0's MCE handler will prevent future access */ - if ( is_hardware_domain(d) ) - return 0; - - if ( !mfn_valid(mfn) ) - return -EINVAL; - - if ( !is_hvm_domain(d) || !paging_mode_hap(d) ) - return -EOPNOTSUPP; - - rc = -1; - r_mfn = get_gfn_query(d, gfn, &pt); - if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES) - { - ASSERT(mfn_eq(r_mfn, mfn)); - rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken); - } - put_gfn(d, gfn); + if ( !paging_mode_hap(d) ) + /* Always trust Dom0's MCE handler will prevent further access. */ + return is_hardware_domain(d) ? 0 : -EOPNOTSUPP; + + ASSERT(mfn_valid(mfn)); + + if ( !mfn_eq(get_gfn_query(d, gfn_x(gfn), &pt), mfn) ) + rc = -EAGAIN; + else if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES ) + rc = p2m_change_type_one(d, gfn_x(gfn), pt, p2m_ram_broken); + else + /* Always trust Dom0's MCE handler will prevent further access. */ + rc = is_hardware_domain(d) ? 0 : -EOPNOTSUPP; + put_gfn(d, gfn_x(gfn)); return rc; } --- a/xen/arch/x86/cpu/mcheck/vmce.h +++ b/xen/arch/x86/cpu/mcheck/vmce.h @@ -9,7 +9,7 @@ int vmce_init(struct cpuinfo_x86 *c); (hardware_domain && \ evtchn_virq_enabled(domain_vcpu(hardware_domain, 0), VIRQ_MCA)) -int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn); +int unmmap_broken_page(struct domain *d, mfn_t mfn, gfn_t gfn); int vmce_intel_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); int vmce_intel_wrmsr(struct vcpu *, uint32_t msr, uint64_t val);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |