WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] M2P translation cannot be handled through

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] M2P translation cannot be handled through flat table with only one slot per MFN
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 16 Dec 2009 22:41:00 -0800
Delivery-date: Wed, 16 Dec 2009 22:42:53 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1261031276 0
# Node ID 257bd5e90294b7e768224c89908745e66efcbcac
# Parent  34d620a47c636c0c72b1270ef6410953909a1bdc
M2P translation cannot be handled through flat table with only one slot per MFN
when an MFN is shared. However, all existing calls can either infer the GFN (for
example p2m table destructor) or will not need to know GFN for shared pages.
This patch identifies and fixes all the M2P accessors, either by removing the
translation altogether or by making the relevant modifications. Shared MFNs have
a special value of SHARED_M2P_ENTRY stored in their M2P table slot.

Signed-off-by: Grzegorz Milos <Grzegorz.Milos@xxxxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/mce_intel.c |    4 ++++
 xen/arch/x86/domain_build.c         |    1 +
 xen/arch/x86/mm.c                   |   17 +++++++++++++++--
 xen/arch/x86/mm/mem_sharing.c       |   17 +++++++++++------
 xen/arch/x86/mm/p2m.c               |   29 ++++++++++++++++++++++-------
 xen/arch/x86/mm/paging.c            |    2 ++
 xen/arch/x86/mm/shadow/multi.c      |    4 ++++
 xen/arch/x86/mm/shadow/private.h    |    2 ++
 xen/arch/x86/traps.c                |   18 +++++++++++++++---
 xen/common/domctl.c                 |    1 +
 xen/common/grant_table.c            |    2 ++
 xen/common/memory.c                 |   30 +++++++++++++++++++++++++++---
 xen/drivers/passthrough/iommu.c     |    1 +
 xen/include/asm-x86/mm.h            |   22 +++++++++++++++++-----
 14 files changed, 124 insertions(+), 26 deletions(-)

diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Thu Dec 17 06:27:56 2009 +0000
@@ -356,6 +356,10 @@ static void intel_UCR_handler(struct mci
                       /* Fill vMCE# injection and vMCE# MSR virtualization "
                        * "related data */
                       bank->mc_domid = result->owner;
+                      /* XXX: Cannot handle shared pages yet 
+                       * (this should identify all domains and gfn mapping to
+                       *  the mfn in question) */
+                      BUG_ON( result->owner == DOMID_COW );
                       if ( result->owner != DOMID_XEN ) {
                           d = get_domain_by_id(result->owner);
                           gfn =
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/domain_build.c       Thu Dec 17 06:27:56 2009 +0000
@@ -931,6 +931,7 @@ int __init construct_dom0(
     page_list_for_each ( page, &d->page_list )
     {
         mfn = page_to_mfn(page);
+        BUG_ON(SHARED_M2P(get_gpfn_from_mfn(mfn)));
         if ( get_gpfn_from_mfn(mfn) >= count )
         {
             BUG_ON(is_pv_32bit_domain(d));
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm.c Thu Dec 17 06:27:56 2009 +0000
@@ -2138,7 +2138,9 @@ int free_page_type(struct page_info *pag
 
         gmfn = mfn_to_gmfn(owner, page_to_mfn(page));
         ASSERT(VALID_M2P(gmfn));
-        shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
+        /* Page sharing not supported for shadowed domains */
+        if(!SHARED_M2P(gmfn))
+            shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
     }
 
     if ( !(type & PGT_partial) )
@@ -4234,12 +4236,22 @@ long arch_memory_op(int op, XEN_GUEST_HA
             spin_unlock(&d->grant_table->lock);
             break;
         case XENMAPSPACE_gmfn:
-            xatp.idx = gmfn_to_mfn(d, xatp.idx);
+        {
+            p2m_type_t p2mt;
+
+            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt, 0));
+            /* If the page is still shared, exit early */
+            if ( p2m_is_shared(p2mt) )
+            {
+                rcu_unlock_domain(d);
+                return -ENOMEM;
+            }
             if ( !get_page_from_pagenr(xatp.idx, d) )
                 break;
             mfn = xatp.idx;
             page = mfn_to_page(mfn);
             break;
+        }
         default:
             break;
         }
@@ -4268,6 +4280,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
 
         /* Unmap from old location, if any. */
         gpfn = get_gpfn_from_mfn(mfn);
+        ASSERT( gpfn != SHARED_M2P_ENTRY );
         if ( gpfn != INVALID_M2P_ENTRY )
             guest_physmap_remove_page(d, gpfn, mfn, 0);
 
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Dec 17 06:27:56 2009 +0000
@@ -216,6 +216,9 @@ int mem_sharing_nominate_page(struct dom
         goto out;
     }
 
+    /* Update m2p entry to SHARED_M2P_ENTRY */
+    set_gpfn_from_mfn(mfn_x(mfn), SHARED_M2P_ENTRY);
+
     ret = 0;
 
 out:
@@ -260,9 +263,11 @@ private_page_found:
         printk("Could not change p2m type.\n");
         BUG();
     }
-
-    return 0;
-}
-
-
-
+    /* Update m2p entry */
+    set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), gfn);
+
+    return 0;
+}
+
+
+
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm/p2m.c     Thu Dec 17 06:27:56 2009 +0000
@@ -1601,6 +1601,8 @@ int p2m_alloc_table(struct domain *d,
     {
         mfn = page_to_mfn(page);
         gfn = get_gpfn_from_mfn(mfn_x(mfn));
+        /* Pages should not be shared that early */
+        ASSERT(gfn != SHARED_M2P_ENTRY);
         page_count++;
         if (
 #ifdef __x86_64__
@@ -1709,6 +1711,13 @@ static void audit_p2m(struct domain *d)
             orphans_d++;
             //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n",
             //               mfn);
+            continue;
+        }
+
+        if ( gfn == SHARED_P2M_ENTRY)
+        {
+            P2M_PRINTK("shared mfn (%lx) on domain page list!\n",
+                    mfn);
             continue;
         }
 
@@ -1803,7 +1812,9 @@ static void audit_p2m(struct domain *d)
                         for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++)
                         {
                             m2pfn = get_gpfn_from_mfn(mfn+i1);
-                            if ( m2pfn != (gfn + i1) )
+                            /* Allow shared M2Ps */
+                            if ( (m2pfn != (gfn + i1)) &&
+                                 (m2pfn != SHARED_M2P_ENTRY) )
                             {
                                 pmbad++;
                                 P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
@@ -1834,7 +1845,8 @@ static void audit_p2m(struct domain *d)
                         m2pfn = get_gpfn_from_mfn(mfn);
                         if ( m2pfn != gfn &&
                              type != p2m_mmio_direct &&
-                             !p2m_is_grant(type) )
+                             !p2m_is_grant(type) &&
+                             !p2m_is_shared(type) )
                         {
                             pmbad++;
                             printk("mismatch: gfn %#lx -> mfn %#lx"
@@ -2137,12 +2149,11 @@ void p2m_change_type_global(struct domai
     l1_pgentry_t *l1e;
     l2_pgentry_t *l2e;
     mfn_t l1mfn, l2mfn;
-    int i1, i2;
+    unsigned long i1, i2, i3;
     l3_pgentry_t *l3e;
-    int i3;
 #if CONFIG_PAGING_LEVELS == 4
     l4_pgentry_t *l4e;
-    int i4;
+    unsigned long i4;
 #endif /* CONFIG_PAGING_LEVELS == 4 */
 
     BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
@@ -2193,7 +2204,10 @@ void p2m_change_type_global(struct domai
                     if ( p2m_flags_to_type(flags) != ot )
                         continue;
                     mfn = l2e_get_pfn(l2e[i2]);
-                    gfn = get_gpfn_from_mfn(mfn);
+                    /* Do not use get_gpfn_from_mfn because it may return 
+                       SHARED_M2P_ENTRY */
+                    gfn = (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
+                           * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; 
                     flags = p2m_type_to_flags(nt);
                     l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
                     paging_write_p2m_entry(d, gfn, (l1_pgentry_t *)&l2e[i2],
@@ -2210,7 +2224,8 @@ void p2m_change_type_global(struct domai
                     if ( p2m_flags_to_type(flags) != ot )
                         continue;
                     mfn = l1e_get_pfn(l1e[i1]);
-                    gfn = get_gpfn_from_mfn(mfn);
+                    gfn = i1 + (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
+                           * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; 
                     /* create a new 1le entry with the new type */
                     flags = p2m_type_to_flags(nt);
                     l1e_content = l1e_from_pfn(mfn, flags);
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm/paging.c  Thu Dec 17 06:27:56 2009 +0000
@@ -280,6 +280,8 @@ void paging_mark_dirty(struct domain *d,
 
     /* We /really/ mean PFN here, even for non-translated guests. */
     pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+    /* Shared MFNs should NEVER be marked dirty */
+    BUG_ON(SHARED_M2P(pfn));
 
     /*
      * Values with the MSB set denote MFNs that aren't really part of the
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Dec 17 06:27:56 2009 +0000
@@ -1070,6 +1070,8 @@ static inline void shadow_vram_get_l1e(s
         return;
 
     gfn = mfn_to_gfn(d, mfn);
+    /* Page sharing not supported on shadow PTs */
+    BUG_ON(SHARED_M2P(gfn));
 
     if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
     {
@@ -1099,6 +1101,8 @@ static inline void shadow_vram_put_l1e(s
         return;
 
     gfn = mfn_to_gfn(d, mfn);
+    /* Page sharing not supported on shadow PTs */
+    BUG_ON(SHARED_M2P(gfn));
 
     if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
     {
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Thu Dec 17 06:27:56 2009 +0000
@@ -565,6 +565,8 @@ sh_mfn_is_dirty(struct domain *d, mfn_t 
 
     /* We /really/ mean PFN here, even for non-translated guests. */
     pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+    /* Page sharing not supported for shadow domains */
+    BUG_ON(SHARED_M2P(pfn));
     if ( unlikely(!VALID_M2P(pfn)) )
         return 0;
     
diff -r 34d620a47c63 -r 257bd5e90294 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/traps.c      Thu Dec 17 06:27:56 2009 +0000
@@ -2088,15 +2088,27 @@ static int emulate_privileged_op(struct 
             break;
             
         case 3: /* Read CR3 */
+        {
+            unsigned long mfn;
+            
             if ( !is_pv_32on64_vcpu(v) )
+            {
+                mfn = pagetable_get_pfn(v->arch.guest_table);
                 *reg = xen_pfn_to_cr3(mfn_to_gmfn(
-                    v->domain, pagetable_get_pfn(v->arch.guest_table)));
+                    v->domain, mfn));
+            }
 #ifdef CONFIG_COMPAT
             else
+            {
+                mfn = l4e_get_pfn(*(l4_pgentry_t 
*)__va(pagetable_get_paddr(v->arch.guest_table)));
                 *reg = compat_pfn_to_cr3(mfn_to_gmfn(
-                    v->domain, l4e_get_pfn(*(l4_pgentry_t 
*)__va(pagetable_get_paddr(v->arch.guest_table)))));
+                    v->domain, mfn));
+            }
 #endif
-            break;
+            /* PTs should not be shared */
+            BUG_ON(page_get_owner(mfn_to_page(mfn)) == dom_cow);
+        }
+        break;
 
         case 4: /* Read CR4 */
             *reg = v->arch.guest_context.ctrlreg[4];
diff -r 34d620a47c63 -r 257bd5e90294 xen/common/domctl.c
--- a/xen/common/domctl.c       Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/common/domctl.c       Thu Dec 17 06:27:56 2009 +0000
@@ -137,6 +137,7 @@ void getdomaininfo(struct domain *d, str
     info->tot_pages         = d->tot_pages;
     info->max_pages         = d->max_pages;
     info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
+    BUG_ON(SHARED_M2P(info->shared_info_frame));
 
     memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
 }
diff -r 34d620a47c63 -r 257bd5e90294 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/common/grant_table.c  Thu Dec 17 06:27:56 2009 +0000
@@ -1195,6 +1195,8 @@ gnttab_setup_table(
     for ( i = 0; i < op.nr_frames; i++ )
     {
         gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
+        /* Grant tables cannot be shared */
+        BUG_ON(SHARED_M2P(gmfn));
         (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
     }
 
diff -r 34d620a47c63 -r 257bd5e90294 xen/common/memory.c
--- a/xen/common/memory.c       Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/common/memory.c       Thu Dec 17 06:27:56 2009 +0000
@@ -22,6 +22,7 @@
 #include <xen/tmem.h>
 #include <asm/current.h>
 #include <asm/hardirq.h>
+#include <asm/p2m.h>
 #include <xen/numa.h>
 #include <public/memory.h>
 #include <xsm/xsm.h>
@@ -151,9 +152,10 @@ int guest_remove_page(struct domain *d, 
 int guest_remove_page(struct domain *d, unsigned long gmfn)
 {
     struct page_info *page;
+    p2m_type_t p2mt;
     unsigned long mfn;
 
-    mfn = gmfn_to_mfn(d, gmfn);
+    mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); 
     if ( unlikely(!mfn_valid(mfn)) )
     {
         gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
@@ -162,6 +164,15 @@ int guest_remove_page(struct domain *d, 
     }
             
     page = mfn_to_page(mfn);
+    /* If gmfn is shared, just drop the guest reference (which may or may not
+     * free the page) */
+    if(p2m_is_shared(p2mt))
+    {
+        put_page_and_type(page);
+        guest_physmap_remove_page(d, gmfn, mfn, 0);
+        return 1;
+    }
+
     if ( unlikely(!get_page(page, d)) )
     {
         gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
@@ -319,7 +330,15 @@ static long memory_exchange(XEN_GUEST_HA
 
             for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
             {
-                mfn = gmfn_to_mfn(d, gmfn + k);
+                p2m_type_t p2mt;
+
+                /* Shared pages cannot be exchanged */
+                mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt, 0));
+                if ( p2m_is_shared(p2mt) )
+                {
+                    rc = -ENOMEM;
+                    goto fail; 
+                }
                 if ( unlikely(!mfn_valid(mfn)) )
                 {
                     rc = -EINVAL;
@@ -358,10 +377,15 @@ static long memory_exchange(XEN_GUEST_HA
         /* Destroy final reference to each input page. */
         while ( (page = page_list_remove_head(&in_chunk_list)) )
         {
+            unsigned long gfn;
+
             if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
                 BUG();
             mfn = page_to_mfn(page);
-            guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, 0);
+            gfn = mfn_to_gmfn(d, mfn);
+            /* Pages were unshared above */
+            BUG_ON(SHARED_M2P(gfn));
+            guest_physmap_remove_page(d, gfn, mfn, 0);
             put_page(page);
         }
 
diff -r 34d620a47c63 -r 257bd5e90294 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/drivers/passthrough/iommu.c   Thu Dec 17 06:27:56 2009 +0000
@@ -168,6 +168,7 @@ static int iommu_populate_page_table(str
         if ( is_hvm_domain(d) ||
             (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
         {
+            BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
             rc = hd->platform_ops->map_page(
                 d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
             if (rc)
diff -r 34d620a47c63 -r 257bd5e90294 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/include/asm-x86/mm.h  Thu Dec 17 06:27:56 2009 +0000
@@ -438,15 +438,27 @@ TYPE_SAFE(unsigned long,mfn);
 #define machine_to_phys_mapping  ((unsigned long *)RDWR_MPT_VIRT_START)
 #define INVALID_M2P_ENTRY        (~0UL)
 #define VALID_M2P(_e)            (!((_e) & (1UL<<(BITS_PER_LONG-1))))
+#define SHARED_M2P_ENTRY         (~0UL - 1UL)
+#define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
 
 #ifdef CONFIG_COMPAT
 #define compat_machine_to_phys_mapping ((unsigned int 
*)RDWR_COMPAT_MPT_VIRT_START)
-#define set_gpfn_from_mfn(mfn, pfn) \
+#define set_gpfn_from_mfn(mfn, pfn) ({                         \
+    struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
+    unsigned long entry = (d && (d == dom_cow)) ?              \
+        SHARED_M2P_ENTRY : (pfn);                              \
     ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 
4 || \
-            (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(pfn))), \
-     machine_to_phys_mapping[(mfn)] = (pfn))
-#else
-#define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
+            (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
+     machine_to_phys_mapping[(mfn)] = (entry));                \
+    })
+#else
+#define set_gpfn_from_mfn(mfn, pfn) ({                         \
+    struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
+    if(d && (d == dom_cow))                                    \
+        machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY;     \
+    else                                                       \
+        machine_to_phys_mapping[(mfn)] = (pfn);                \
+    })
 #endif
 #define get_gpfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] M2P translation cannot be handled through flat table with only one slot per MFN, Xen patchbot-unstable <=