WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 03 of 17] x86/mm/p2m: hide the current-domain fast-pa

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 03 of 17] x86/mm/p2m: hide the current-domain fast-path inside the p2m-pt code
From: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Date: Thu, 2 Jun 2011 13:20:13 +0100
Delivery-date: Thu, 02 Jun 2011 05:29:25 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1307017210@xxxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1307017210@xxxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.8.3
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID a7d8612c9ba14ae6efbf420e213c983902433942
# Parent  9344034d624b2e3cd6b0025ab2051cb89bd7e04a
x86/mm/p2m: hide the current-domain fast-path inside the p2m-pt code.

The other implementations of the p2m interface don't have this, and
it will go away entirely when 32-bit builds go away, so take it out
of the interface.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r 9344034d624b -r a7d8612c9ba1 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jun 02 13:16:52 2011 +0100
@@ -1212,7 +1212,7 @@ int hvm_hap_nested_page_fault(unsigned l
     }
 
     p2m = p2m_get_hostp2m(v->domain);
-    mfn = gfn_to_mfn_type_current(p2m, gfn, &p2mt, &p2ma, p2m_guest);
+    mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest);
 
     /* Check access permissions first, then handle faults */
     if ( access_valid && (mfn_x(mfn) != INVALID_MFN) )
diff -r 9344034d624b -r a7d8612c9ba1 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m-ept.c Thu Jun 02 13:16:52 2011 +0100
@@ -692,13 +692,6 @@ out:
     return;
 }
 
-static mfn_t ept_get_entry_current(struct p2m_domain *p2m,
-                                   unsigned long gfn, p2m_type_t *t, 
p2m_access_t *a,
-                                   p2m_query_t q)
-{
-    return ept_get_entry(p2m, gfn, t, a, q);
-}
-
 /*
  * To test if the new emt type is the same with old,
  * return 1 to not to reset ept entry.
@@ -824,7 +817,6 @@ void ept_p2m_init(struct p2m_domain *p2m
 {
     p2m->set_entry = ept_set_entry;
     p2m->get_entry = ept_get_entry;
-    p2m->get_entry_current = ept_get_entry_current;
     p2m->change_entry_type_global = ept_change_entry_type_global;
 }
 
diff -r 9344034d624b -r a7d8612c9ba1 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:52 2011 +0100
@@ -503,6 +503,180 @@ static int p2m_pod_check_and_populate(st
     return r;
 }
 
+/* Read the current domain's p2m table (through the linear mapping). */
+static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m, 
+                                    unsigned long gfn, p2m_type_t *t, 
+                                    p2m_access_t *a, p2m_query_t q)
+{
+    mfn_t mfn = _mfn(INVALID_MFN);
+    p2m_type_t p2mt = p2m_mmio_dm;
+    paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
+    /* XXX This is for compatibility with the old model, where anything not 
+     * XXX marked as RAM was considered to be emulated MMIO space.
+     * XXX Once we start explicitly registering MMIO regions in the p2m 
+     * XXX we will return p2m_invalid for unmapped gfns */
+
+    l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
+    l2_pgentry_t l2e = l2e_empty();
+    int ret;
+#if CONFIG_PAGING_LEVELS >= 4
+    l3_pgentry_t l3e = l3e_empty();
+#endif
+
+    ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) 
+           / sizeof(l1_pgentry_t));
+
+#if CONFIG_PAGING_LEVELS >= 4
+    /*
+     * Read & process L3
+     */
+    p2m_entry = (l1_pgentry_t *)
+        &__linear_l2_table[l2_linear_offset(RO_MPT_VIRT_START)
+                           + l3_linear_offset(addr)];
+pod_retry_l3:
+    ret = __copy_from_user(&l3e, p2m_entry, sizeof(l3e));
+
+    if ( ret != 0 || !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+    {
+        if ( (l3e_get_flags(l3e) & _PAGE_PSE) &&
+             (p2m_flags_to_type(l3e_get_flags(l3e)) == p2m_populate_on_demand) 
)
+        {
+            /* The read has succeeded, so we know that mapping exists */
+            if ( q != p2m_query )
+            {
+                if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
+                    goto pod_retry_l3;
+                p2mt = p2m_invalid;
+                printk("%s: Allocate 1GB failed!\n", __func__);
+                goto out;
+            }
+            else
+            {
+                p2mt = p2m_populate_on_demand;
+                goto out;
+            }
+        }
+        goto pod_retry_l2;
+    }
+
+    if ( l3e_get_flags(l3e) & _PAGE_PSE )
+    {
+        p2mt = p2m_flags_to_type(l3e_get_flags(l3e));
+        ASSERT(l3e_get_pfn(l3e) != INVALID_MFN || !p2m_is_ram(p2mt));
+        if (p2m_is_valid(p2mt) )
+            mfn = _mfn(l3e_get_pfn(l3e) + 
+                       l2_table_offset(addr) * L1_PAGETABLE_ENTRIES + 
+                       l1_table_offset(addr));
+        else
+            p2mt = p2m_mmio_dm;
+            
+        goto out;
+    }
+#endif
+    /*
+     * Read & process L2
+     */
+    p2m_entry = &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START)
+                                   + l2_linear_offset(addr)];
+
+pod_retry_l2:
+    ret = __copy_from_user(&l2e,
+                           p2m_entry,
+                           sizeof(l2e));
+    if ( ret != 0
+         || !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
+    {
+        if( (l2e_get_flags(l2e) & _PAGE_PSE)
+            && ( p2m_flags_to_type(l2e_get_flags(l2e))
+                 == p2m_populate_on_demand ) )
+        {
+            /* The read has succeeded, so we know that the mapping
+             * exits at this point.  */
+            if ( q != p2m_query )
+            {
+                if ( !p2m_pod_check_and_populate(p2m, gfn,
+                                                 p2m_entry, 9, q) )
+                    goto pod_retry_l2;
+
+                /* Allocate failed. */
+                p2mt = p2m_invalid;
+                printk("%s: Allocate failed!\n", __func__);
+                goto out;
+            }
+            else
+            {
+                p2mt = p2m_populate_on_demand;
+                goto out;
+            }
+        }
+
+        goto pod_retry_l1;
+    }
+        
+    if (l2e_get_flags(l2e) & _PAGE_PSE)
+    {
+        p2mt = p2m_flags_to_type(l2e_get_flags(l2e));
+        ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt));
+
+        if ( p2m_is_valid(p2mt) )
+            mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr));
+        else
+            p2mt = p2m_mmio_dm;
+
+        goto out;
+    }
+
+    /*
+     * Read and process L1
+     */
+
+    /* Need to __copy_from_user because the p2m is sparse and this
+     * part might not exist */
+pod_retry_l1:
+    p2m_entry = &phys_to_machine_mapping[gfn];
+
+    ret = __copy_from_user(&l1e,
+                           p2m_entry,
+                           sizeof(l1e));
+            
+    if ( ret == 0 ) {
+        p2mt = p2m_flags_to_type(l1e_get_flags(l1e));
+        ASSERT(l1e_get_pfn(l1e) != INVALID_MFN || !p2m_is_ram(p2mt));
+
+        if ( p2m_flags_to_type(l1e_get_flags(l1e))
+             == p2m_populate_on_demand )
+        {
+            /* The read has succeeded, so we know that the mapping
+             * exits at this point.  */
+            if ( q != p2m_query )
+            {
+                if ( !p2m_pod_check_and_populate(p2m, gfn,
+                                                 (l1_pgentry_t *)p2m_entry, 0, 
q) )
+                    goto pod_retry_l1;
+
+                /* Allocate failed. */
+                p2mt = p2m_invalid;
+                goto out;
+            }
+            else
+            {
+                p2mt = p2m_populate_on_demand;
+                goto out;
+            }
+        }
+
+        if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) )
+            mfn = _mfn(l1e_get_pfn(l1e));
+        else 
+            /* XXX see above */
+            p2mt = p2m_mmio_dm;
+    }
+    
+out:
+    *t = p2mt;
+    return mfn;
+}
+
 
 static mfn_t
 p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, 
p2m_access_t *a,
@@ -529,6 +703,10 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u
         /* This pfn is higher than the highest the p2m map currently holds */
         return _mfn(INVALID_MFN);
 
+    /* Use the fast path with the linear mapping if we can */
+    if ( p2m == p2m_get_hostp2m(current->domain) )
+        return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q);
+
 #if CONFIG_PAGING_LEVELS >= 4
     {
         l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
@@ -646,185 +824,6 @@ pod_retry_l1:
     return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN);
 }
 
-/* Read the current domain's p2m table (through the linear mapping). */
-static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
-                                    unsigned long gfn, p2m_type_t *t, 
p2m_access_t *a,
-                                    p2m_query_t q)
-{
-    mfn_t mfn = _mfn(INVALID_MFN);
-    p2m_type_t p2mt = p2m_mmio_dm;
-    paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
-    /* XXX This is for compatibility with the old model, where anything not 
-     * XXX marked as RAM was considered to be emulated MMIO space.
-     * XXX Once we start explicitly registering MMIO regions in the p2m 
-     * XXX we will return p2m_invalid for unmapped gfns */
-
-    /* Not currently implemented except for EPT */
-    *a = p2m_access_rwx;
-
-    if ( gfn <= p2m->max_mapped_pfn )
-    {
-        l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
-        l2_pgentry_t l2e = l2e_empty();
-        int ret;
-#if CONFIG_PAGING_LEVELS >= 4
-        l3_pgentry_t l3e = l3e_empty();
-#endif
-
-        ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) 
-               / sizeof(l1_pgentry_t));
-
-#if CONFIG_PAGING_LEVELS >= 4
-        /*
-         * Read & process L3
-         */
-        p2m_entry = (l1_pgentry_t *)
-            &__linear_l2_table[l2_linear_offset(RO_MPT_VIRT_START)
-                               + l3_linear_offset(addr)];
-    pod_retry_l3:
-        ret = __copy_from_user(&l3e, p2m_entry, sizeof(l3e));
-
-        if ( ret != 0 || !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
-        {
-            if ( (l3e_get_flags(l3e) & _PAGE_PSE) &&
-                 (p2m_flags_to_type(l3e_get_flags(l3e)) == 
p2m_populate_on_demand) )
-            {
-                /* The read has succeeded, so we know that mapping exists */
-                if ( q != p2m_query )
-                {
-                    if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
-                        goto pod_retry_l3;
-                    p2mt = p2m_invalid;
-                    printk("%s: Allocate 1GB failed!\n", __func__);
-                    goto out;
-                }
-                else
-                {
-                    p2mt = p2m_populate_on_demand;
-                    goto out;
-                }
-            }
-            goto pod_retry_l2;
-        }
-
-        if ( l3e_get_flags(l3e) & _PAGE_PSE )
-        {
-            p2mt = p2m_flags_to_type(l3e_get_flags(l3e));
-            ASSERT(l3e_get_pfn(l3e) != INVALID_MFN || !p2m_is_ram(p2mt));
-            if (p2m_is_valid(p2mt) )
-                mfn = _mfn(l3e_get_pfn(l3e) + 
-                           l2_table_offset(addr) * L1_PAGETABLE_ENTRIES + 
-                           l1_table_offset(addr));
-            else
-                p2mt = p2m_mmio_dm;
-            
-            goto out;
-        }
-#endif
-        /*
-         * Read & process L2
-         */
-        p2m_entry = &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START)
-                                       + l2_linear_offset(addr)];
-
-    pod_retry_l2:
-        ret = __copy_from_user(&l2e,
-                               p2m_entry,
-                               sizeof(l2e));
-        if ( ret != 0
-             || !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
-        {
-            if( (l2e_get_flags(l2e) & _PAGE_PSE)
-                && ( p2m_flags_to_type(l2e_get_flags(l2e))
-                     == p2m_populate_on_demand ) )
-            {
-                /* The read has succeeded, so we know that the mapping
-                 * exits at this point.  */
-                if ( q != p2m_query )
-                {
-                    if ( !p2m_pod_check_and_populate(p2m, gfn,
-                                                     p2m_entry, 9, q) )
-                        goto pod_retry_l2;
-
-                    /* Allocate failed. */
-                    p2mt = p2m_invalid;
-                    printk("%s: Allocate failed!\n", __func__);
-                    goto out;
-                }
-                else
-                {
-                    p2mt = p2m_populate_on_demand;
-                    goto out;
-                }
-            }
-
-            goto pod_retry_l1;
-        }
-        
-        if (l2e_get_flags(l2e) & _PAGE_PSE)
-        {
-            p2mt = p2m_flags_to_type(l2e_get_flags(l2e));
-            ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt));
-
-            if ( p2m_is_valid(p2mt) )
-                mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr));
-            else
-                p2mt = p2m_mmio_dm;
-
-            goto out;
-        }
-
-        /*
-         * Read and process L1
-         */
-
-        /* Need to __copy_from_user because the p2m is sparse and this
-         * part might not exist */
-    pod_retry_l1:
-        p2m_entry = &phys_to_machine_mapping[gfn];
-
-        ret = __copy_from_user(&l1e,
-                               p2m_entry,
-                               sizeof(l1e));
-            
-        if ( ret == 0 ) {
-            p2mt = p2m_flags_to_type(l1e_get_flags(l1e));
-            ASSERT(l1e_get_pfn(l1e) != INVALID_MFN || !p2m_is_ram(p2mt));
-
-            if ( p2m_flags_to_type(l1e_get_flags(l1e))
-                 == p2m_populate_on_demand )
-            {
-                /* The read has succeeded, so we know that the mapping
-                 * exits at this point.  */
-                if ( q != p2m_query )
-                {
-                    if ( !p2m_pod_check_and_populate(p2m, gfn,
-                                                     (l1_pgentry_t 
*)p2m_entry, 0, q) )
-                        goto pod_retry_l1;
-
-                    /* Allocate failed. */
-                    p2mt = p2m_invalid;
-                    goto out;
-                }
-                else
-                {
-                    p2mt = p2m_populate_on_demand;
-                    goto out;
-                }
-            }
-
-            if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) )
-                mfn = _mfn(l1e_get_pfn(l1e));
-            else 
-                /* XXX see above */
-                p2mt = p2m_mmio_dm;
-        }
-    }
-out:
-    *t = p2mt;
-    return mfn;
-}
-
 /* Walk the whole p2m table, changing any entries of the old type
  * to the new type.  This is used in hardware-assisted paging to 
  * quickly enable or diable log-dirty tracking */
@@ -968,7 +967,6 @@ void p2m_pt_init(struct p2m_domain *p2m)
 {
     p2m->set_entry = p2m_set_entry;
     p2m->get_entry = p2m_gfn_to_mfn;
-    p2m->get_entry_current = p2m_gfn_to_mfn_current;
     p2m->change_entry_type_global = p2m_change_type_global;
     p2m->write_p2m_entry = paging_write_p2m_entry;
 }
diff -r 9344034d624b -r a7d8612c9ba1 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
@@ -101,7 +101,6 @@ p2m_init_nestedp2m(struct domain *d)
         if (p2m == NULL)
             return -ENOMEM;
         p2m_initialise(d, p2m);
-        p2m->get_entry_current = p2m->get_entry;
         p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
     }
 
@@ -956,7 +955,6 @@ p2m_flush_locked(struct p2m_domain *p2m)
 
     p2m_teardown(p2m);
     p2m_initialise(p2m->domain, p2m);
-    p2m->get_entry_current = p2m->get_entry;
     p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
     return p2m_alloc_table(p2m);
 }
diff -r 9344034d624b -r a7d8612c9ba1 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
@@ -218,11 +218,6 @@ struct p2m_domain {
                                        p2m_type_t *p2mt,
                                        p2m_access_t *p2ma,
                                        p2m_query_t q);
-    mfn_t              (*get_entry_current)(struct p2m_domain *p2m,
-                                            unsigned long gfn,
-                                            p2m_type_t *p2mt,
-                                            p2m_access_t *p2ma,
-                                            p2m_query_t q);
     void               (*change_entry_type_global)(struct p2m_domain *p2m,
                                                    p2m_type_t ot,
                                                    p2m_type_t nt);
@@ -362,45 +357,14 @@ struct p2m_domain *p2m_get_p2m(struct vc
         spin_unlock(&(_domain)->arch.nested_p2m_lock);                 \
     } while (0)
 
-/* Read the current domain's p2m table.  Do not populate PoD pages. */
-static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
-                                            unsigned long gfn, p2m_type_t *t,
-                                            p2m_access_t *a,
-                                            p2m_query_t q)
-{
-    return p2m->get_entry_current(p2m, gfn, t, a, q);
-}
 
 /* Read P2M table, mapping pages as we go.
  * Do not populate PoD pages. */
 static inline mfn_t
 gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
-                              p2m_type_t *t, p2m_query_t q)
+                    p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
 {
-    p2m_access_t a = 0;
-    return p2m->get_entry(p2m, gfn, t, &a, q);
-}
-
-
-/* General conversion function from gfn to mfn */
-static inline mfn_t _gfn_to_mfn_type(struct p2m_domain *p2m,
-                                     unsigned long gfn, p2m_type_t *t,
-                                     p2m_query_t q)
-{
-    mfn_t mfn;
-    p2m_access_t a;
-
-    if ( !p2m || !paging_mode_translate(p2m->domain) )
-    {
-        /* Not necessarily true, but for non-translated guests, we claim
-         * it's the most generic kind of memory */
-        *t = p2m_ram_rw;
-        mfn = _mfn(gfn);
-    }
-    else if ( likely(current->domain == p2m->domain) )
-        mfn = gfn_to_mfn_type_current(p2m, gfn, t, &a, q);
-    else
-        mfn = gfn_to_mfn_type_p2m(p2m, gfn, t, q);
+    mfn_t mfn = p2m->get_entry(p2m, gfn, t, a, q);
 
 #ifdef __x86_64__
     if (unlikely((p2m_is_broken(*t))))
@@ -415,6 +379,25 @@ static inline mfn_t _gfn_to_mfn_type(str
     return mfn;
 }
 
+
+/* General conversion function from gfn to mfn */
+static inline mfn_t _gfn_to_mfn_type(struct p2m_domain *p2m,
+                                     unsigned long gfn, p2m_type_t *t,
+                                     p2m_query_t q)
+{
+    p2m_access_t a;
+
+    if ( !p2m || !paging_mode_translate(p2m->domain) )
+    {
+        /* Not necessarily true, but for non-translated guests, we claim
+         * it's the most generic kind of memory */
+        *t = p2m_ram_rw;
+        return _mfn(gfn);
+    }
+    
+    return gfn_to_mfn_type_p2m(p2m, gfn, t, &a, q);
+}
+
 #define gfn_to_mfn(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_alloc)
 #define gfn_to_mfn_query(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), 
p2m_query)
 #define gfn_to_mfn_guest(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), 
p2m_guest)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>