WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86 mca: unmap broken memory in EPT guest

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86 mca: unmap broken memory in EPT guest in MCA delayed handler
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 15 Sep 2010 07:40:39 -0700
Delivery-date: Wed, 15 Sep 2010 07:44:40 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1284396499 -3600
# Node ID 65010d314adb1efe0674b2c1e7337affc6dbe57b
# Parent  38b41484c599df2c75aa7d1a7207f2c0d360f030
x86 mca: unmap broken memory in EPT guest in MCA delayed handler

When a memory owned by EPT guest is broken, we change the P2M type to
be broken memory type, so that later access from the guest to the
broken memory will be trapped as EPT violation.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/mce.h       |    1 
 xen/arch/x86/cpu/mcheck/mce_intel.c |   32 +++++++++++++++---------
 xen/arch/x86/cpu/mcheck/vmce.c      |   48 ++++++++++++++++++++++++++++++++++++
 3 files changed, 69 insertions(+), 12 deletions(-)

diff -r 38b41484c599 -r 65010d314adb xen/arch/x86/cpu/mcheck/mce.h
--- a/xen/arch/x86/cpu/mcheck/mce.h     Mon Sep 13 17:47:40 2010 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce.h     Mon Sep 13 17:48:19 2010 +0100
@@ -49,6 +49,7 @@ void amd_nonfatal_mcheck_init(struct cpu
 void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
 
 int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d);
+int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn);
 
 u64 mce_cap_init(void);
 extern int firstbank;
diff -r 38b41484c599 -r 65010d314adb xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Mon Sep 13 17:47:40 2010 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Mon Sep 13 17:48:19 2010 +0100
@@ -654,16 +654,22 @@ static void intel_memerr_dhandler(int bn
             BUG_ON( result->owner == DOMID_COW );
             if ( result->owner != DOMID_XEN ) {
                 d = get_domain_by_id(result->owner);
+                ASSERT(d);
+                gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
+
                 if ( !is_vmce_ready(bank, d) )
                 {
-                    /* Should not inject vMCE to guest */
-                    if ( d )
-                        put_domain(d);
-                    return;
+                    printk("DOM%d not ready for vMCE\n", d->domain_id);
+                    goto vmce_failed;
                 }
 
-                ASSERT(d);
-                gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
+                if ( unmmap_broken_page(d, _mfn(mfn), gfn) )
+                {
+                    printk("Unmap broken memory %lx for DOM%d failed\n",
+                            mfn, d->domain_id);
+                    goto vmce_failed;
+                }
+
                 bank->mc_addr =  gfn << PAGE_SHIFT |
                   (bank->mc_addr & (PAGE_SIZE -1 ));
                 if ( fill_vmsr_data(bank, d,
@@ -671,18 +677,15 @@ static void intel_memerr_dhandler(int bn
                 {
                     mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d "
                       "failed\n", result->owner);
-                    put_domain(d);
-                    domain_crash(d);
-                    return;
+                    goto vmce_failed;
                 }
+
                 /* We will inject vMCE to DOMU*/
                 if ( inject_vmce(d) < 0 )
                 {
                     mce_printk(MCE_QUIET, "inject vMCE to DOM%d"
                       " failed\n", d->domain_id);
-                    put_domain(d);
-                    domain_crash(d);
-                    return;
+                    goto vmce_failed;
                 }
                 /* Impacted domain go on with domain's recovery job
                  * if the domain has its own MCA handler.
@@ -691,6 +694,11 @@ static void intel_memerr_dhandler(int bn
                  */
                 result->result = MCA_RECOVERED;
                 put_domain(d);
+
+                return;
+vmce_failed:
+                put_domain(d);
+                domain_crash(d);
             }
         }
     }
diff -r 38b41484c599 -r 65010d314adb xen/arch/x86/cpu/mcheck/vmce.c
--- a/xen/arch/x86/cpu/mcheck/vmce.c    Mon Sep 13 17:47:40 2010 +0100
+++ b/xen/arch/x86/cpu/mcheck/vmce.c    Mon Sep 13 17:48:19 2010 +0100
@@ -558,3 +558,51 @@ int is_vmce_ready(struct mcinfo_bank *ba
 
     return 0;
 }
+
+/* It's said some ram is setup as mmio_direct for UC cache attribute */
+#define P2M_UNMAP_TYPES (p2m_to_mask(p2m_ram_rw) \
+                                | p2m_to_mask(p2m_ram_logdirty) \
+                                | p2m_to_mask(p2m_ram_ro)       \
+                                | p2m_to_mask(p2m_mmio_direct))
+
+/*
+ * Currently all CPUs are redenzevous at the MCE softirq handler, no
+ * need to consider paging p2m type
+ * Currently only support HVM guest with EPT paging mode
+ * XXX following situation missed:
+ * PoD, Foreign mapped, Granted, Shared
+ */
+int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
+{
+    mfn_t r_mfn;
+    struct p2m_domain *p2m;
+    p2m_type_t pt;
+
+    /* Always trust dom0's MCE handler will prevent future access */
+    if ( d == dom0 )
+        return 0;
+
+    if (!mfn_valid(mfn_x(mfn)))
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
+        return -ENOSYS;
+
+    p2m = p2m_get_hostp2m(d);
+    ASSERT(p2m);
+
+    /* This only happen for PoD memory, which should be handled seperetely */
+    if (gfn > p2m->max_mapped_pfn)
+        return -EINVAL;
+
+    r_mfn = gfn_to_mfn_query(p2m, gfn, &pt);
+    if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
+    {
+        ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
+        p2m_change_type(p2m, gfn, pt, p2m_ram_broken);
+        return 0;
+    }
+
+    return -1;
+}
+

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86 mca: unmap broken memory in EPT guest in MCA delayed handler, Xen patchbot-unstable <=