[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] x86/mm: factor out the code for shattering an l3 PTE
map_pages_to_xen and modify_xen_mappings are performing almost exactly the same operations when shattering an l3 PTE, the only difference being whether we want to flush. Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx> --- xen/arch/x86/mm.c | 85 ++++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 45 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 7d4dd80a85..42aaaa1083 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5151,6 +5151,43 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v) flush_area_local((const void *)v, f) : \ flush_area_all((const void *)v, f)) +/* Shatter an l3 entry and populate l2. If virt is passed in, also do flush. */ +static void shatter_l3e(l3_pgentry_t *pl3e, l2_pgentry_t *l2t, + unsigned long virt, bool locking) +{ + unsigned int i; + l3_pgentry_t ol3e = *pl3e; + + for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) + l2e_write(l2t + i, + l2e_from_pfn(l3e_get_pfn(ol3e) + + (i << PAGETABLE_ORDER), + l3e_get_flags(ol3e))); + + if ( locking ) + spin_lock(&map_pgdir_lock); + if ( (l3e_get_flags(ol3e) & _PAGE_PRESENT) && + (l3e_get_flags(ol3e) & _PAGE_PSE) ) + { + l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(l2t), + __PAGE_HYPERVISOR)); + l2t = NULL; + } + if ( locking ) + spin_unlock(&map_pgdir_lock); + if ( virt ) + { + unsigned int flush_flags = + FLUSH_TLB | FLUSH_ORDER(2 * PAGETABLE_ORDER); + + if ( (l3e_get_flags(ol3e) & _PAGE_GLOBAL) ) + flush_flags |= FLUSH_TLB_GLOBAL; + flush_area(virt, flush_flags); + } + if ( l2t ) + free_xen_pagetable(l2t); +} + int map_pages_to_xen( unsigned long virt, mfn_t mfn, @@ -5244,9 +5281,6 @@ int map_pages_to_xen( if ( (l3e_get_flags(ol3e) & _PAGE_PRESENT) && (l3e_get_flags(ol3e) & _PAGE_PSE) ) { - unsigned int flush_flags = - FLUSH_TLB | FLUSH_ORDER(2 * PAGETABLE_ORDER); - /* Skip this PTE if there is no change. */ if ( ((l3e_get_pfn(ol3e) & ~(L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES - 1)) + @@ -5270,30 +5304,8 @@ int map_pages_to_xen( pl2e = alloc_xen_pagetable(); if ( pl2e == NULL ) return -ENOMEM; - - for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) - l2e_write(pl2e + i, - l2e_from_pfn(l3e_get_pfn(ol3e) + - (i << PAGETABLE_ORDER), - l3e_get_flags(ol3e))); - - if ( l3e_get_flags(ol3e) & _PAGE_GLOBAL ) - flush_flags |= FLUSH_TLB_GLOBAL; - - if ( locking ) - spin_lock(&map_pgdir_lock); - if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) && - (l3e_get_flags(*pl3e) & _PAGE_PSE) ) - { - l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(pl2e), - __PAGE_HYPERVISOR)); - pl2e = NULL; - } - if ( locking ) - spin_unlock(&map_pgdir_lock); - flush_area(virt, flush_flags); - if ( pl2e ) - free_xen_pagetable(pl2e); + /* Pass virt to indicate we need to flush. */ + shatter_l3e(pl3e, pl2e, virt, locking); } pl2e = virt_to_xen_l2e(virt); @@ -5581,24 +5593,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) pl2e = alloc_xen_pagetable(); if ( !pl2e ) return -ENOMEM; - for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) - l2e_write(pl2e + i, - l2e_from_pfn(l3e_get_pfn(*pl3e) + - (i << PAGETABLE_ORDER), - l3e_get_flags(*pl3e))); - if ( locking ) - spin_lock(&map_pgdir_lock); - if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) && - (l3e_get_flags(*pl3e) & _PAGE_PSE) ) - { - l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(pl2e), - __PAGE_HYPERVISOR)); - pl2e = NULL; - } - if ( locking ) - spin_unlock(&map_pgdir_lock); - if ( pl2e ) - free_xen_pagetable(pl2e); + shatter_l3e(pl3e, pl2e, 0, locking); } /* -- 2.17.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |