[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V4 10/10] xen: Speed up set_phys_to_machine() by using read-only mappings



Instead of checking at each call of set_phys_to_machine() whether a
new p2m page has to be allocated due to writing an entry in a large
invalid or identity area, just map those areas read only and react
to a page fault on write by allocating the new page.

This change will make the common path with no allocation much
faster as it only requires a single write of the new mfn instead
of walking the address translation tables and checking for the
special cases.

Suggested-by: David Vrabel <david.vrabel@xxxxxxxxxx>
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: David Vrabel <david.vrabel@xxxxxxxxxx>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 arch/x86/xen/p2m.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 7d84473..8b5db51 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -70,6 +70,7 @@
 
 #include <asm/cache.h>
 #include <asm/setup.h>
+#include <asm/uaccess.h>
 
 #include <asm/xen/page.h>
 #include <asm/xen/hypercall.h>
@@ -316,9 +317,9 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
        paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
        for (i = 0; i < PTRS_PER_PTE; i++) {
                set_pte(p2m_missing_pte + i,
-                       pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL));
+                       pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
                set_pte(p2m_identity_pte + i,
-                       pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL));
+                       pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
        }
 
        for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
@@ -365,7 +366,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
                                p2m_missing : p2m_identity;
                        ptep = populate_extra_pte((unsigned long)(p2m + pfn));
                        set_pte(ptep,
-                               pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
+                               pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
                        continue;
                }
 
@@ -624,6 +625,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long 
mfn)
                return true;
        }
 
+       if (likely(!__put_user(mfn, xen_p2m_addr + pfn)))
+               return true;
+
        ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
        BUG_ON(!ptep || level != PG_LEVEL_4K);
 
@@ -633,9 +637,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long 
mfn)
        if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
                return mfn == IDENTITY_FRAME(pfn);
 
-       xen_p2m_addr[pfn] = mfn;
-
-       return true;
+       return false;
 }
 
 bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-- 
2.1.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.