[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 34/44] x86: Drop the PERDOMAIN mappings



With the mapcache, xlat and GDT/LDT moved over to the PERCPU mappings, there
are no remaining users of the PERDOMAIN mappings.  Drop the whole PERDOMAIN
infrastructure, and remove the PERDOMAIN slot in the virtual address layout.

Slide each of the subsequent slots back by one, and extend the directmap back
to its original size.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/domain.c        |   2 -
 xen/arch/x86/hvm/hvm.c       |   6 --
 xen/arch/x86/mm.c            | 234 -------------------------------------------
 xen/arch/x86/pv/domain.c     |  39 +-------
 xen/include/asm-x86/config.h |  36 ++-----
 xen/include/asm-x86/domain.h |   4 -
 xen/include/asm-x86/mm.h     |  10 --
 7 files changed, 8 insertions(+), 323 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 2d665c6..eeca01d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -568,7 +568,6 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
     xfree(d->arch.msr);
     if ( paging_initialised )
         paging_final_teardown(d);
-    free_perdomain_mappings(d);
 
     return rc;
 }
@@ -590,7 +589,6 @@ void arch_domain_destroy(struct domain *d)
 
     if ( is_pv_domain(d) )
         pv_domain_destroy(d);
-    free_perdomain_mappings(d);
 
     free_xenheap_page(d->shared_info);
     cleanup_domain_irq_mapping(d);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5836269..85447dd 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -583,10 +583,6 @@ int hvm_domain_initialise(struct domain *d, unsigned long 
domcr_flags,
     INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
     INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list);
 
-    rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL, NULL);
-    if ( rc )
-        goto fail;
-
     hvm_init_cacheattr_region_list(d);
 
     rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
@@ -670,8 +666,6 @@ int hvm_domain_initialise(struct domain *d, unsigned long 
domcr_flags,
     xfree(d->arch.hvm_domain.irq);
  fail0:
     hvm_destroy_cacheattr_region_list(d);
-    destroy_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0);
- fail:
     return rc;
 }
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8b925b3..933bd67 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1594,13 +1594,6 @@ void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
         mfn_eq(sl4mfn, INVALID_MFN) ? l4e_empty() :
         l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
 
-    /* Slot 261: Per-domain mappings (if applicable). */
-    l4t[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        d ? l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW)
-          : l4e_empty();
-
-    /* !!! WARNING - TEMPORARILY STALE BELOW !!! */
-
     /* Slot 261-: text/data/bss, RW M2P, vmap, frametable, directmap. */
 #ifndef NDEBUG
     if ( short_directmap &&
@@ -5257,233 +5250,6 @@ void __iomem *ioremap(paddr_t pa, size_t len)
     return (void __force __iomem *)va;
 }
 
-int create_perdomain_mapping(struct domain *d, unsigned long va,
-                             unsigned int nr, l1_pgentry_t **pl1tab,
-                             struct page_info **ppg)
-{
-    struct page_info *pg;
-    l3_pgentry_t *l3tab;
-    l2_pgentry_t *l2tab;
-    l1_pgentry_t *l1tab;
-    int rc = 0;
-
-    ASSERT(va >= PERDOMAIN_VIRT_START &&
-           va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
-
-    if ( !d->arch.perdomain_l3_pg )
-    {
-        pg = alloc_domheap_page(d, MEMF_no_owner);
-        if ( !pg )
-            return -ENOMEM;
-        l3tab = __map_domain_page(pg);
-        clear_page(l3tab);
-        d->arch.perdomain_l3_pg = pg;
-        if ( !nr )
-        {
-            unmap_domain_page(l3tab);
-            return 0;
-        }
-    }
-    else if ( !nr )
-        return 0;
-    else
-        l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
-
-    ASSERT(!l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
-
-    if ( !(l3e_get_flags(l3tab[l3_table_offset(va)]) & _PAGE_PRESENT) )
-    {
-        pg = alloc_domheap_page(d, MEMF_no_owner);
-        if ( !pg )
-        {
-            unmap_domain_page(l3tab);
-            return -ENOMEM;
-        }
-        l2tab = __map_domain_page(pg);
-        clear_page(l2tab);
-        l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR_RW);
-    }
-    else
-        l2tab = map_l2t_from_l3e(l3tab[l3_table_offset(va)]);
-
-    unmap_domain_page(l3tab);
-
-    if ( !pl1tab && !ppg )
-    {
-        unmap_domain_page(l2tab);
-        return 0;
-    }
-
-    for ( l1tab = NULL; !rc && nr--; )
-    {
-        l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
-
-        if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
-        {
-            if ( pl1tab && !IS_NIL(pl1tab) )
-            {
-                l1tab = alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
-                if ( !l1tab )
-                {
-                    rc = -ENOMEM;
-                    break;
-                }
-                ASSERT(!pl1tab[l2_table_offset(va)]);
-                pl1tab[l2_table_offset(va)] = l1tab;
-                pg = virt_to_page(l1tab);
-            }
-            else
-            {
-                pg = alloc_domheap_page(d, MEMF_no_owner);
-                if ( !pg )
-                {
-                    rc = -ENOMEM;
-                    break;
-                }
-                l1tab = __map_domain_page(pg);
-            }
-            clear_page(l1tab);
-            *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR_RW);
-        }
-        else if ( !l1tab )
-            l1tab = map_l1t_from_l2e(*pl2e);
-
-        if ( ppg &&
-             !(l1e_get_flags(l1tab[l1_table_offset(va)]) & _PAGE_PRESENT) )
-        {
-            pg = alloc_domheap_page(d, MEMF_no_owner);
-            if ( pg )
-            {
-                clear_domain_page(page_to_mfn(pg));
-                if ( !IS_NIL(ppg) )
-                    *ppg++ = pg;
-                l1tab[l1_table_offset(va)] =
-                    l1e_from_page(pg, __PAGE_HYPERVISOR_RW | _PAGE_AVAIL0);
-                l2e_add_flags(*pl2e, _PAGE_AVAIL0);
-            }
-            else
-                rc = -ENOMEM;
-        }
-
-        va += PAGE_SIZE;
-        if ( rc || !nr || !l1_table_offset(va) )
-        {
-            /* Note that this is a no-op for the alloc_xenheap_page() case. */
-            unmap_domain_page(l1tab);
-            l1tab = NULL;
-        }
-    }
-
-    ASSERT(!l1tab);
-    unmap_domain_page(l2tab);
-
-    return rc;
-}
-
-void destroy_perdomain_mapping(struct domain *d, unsigned long va,
-                               unsigned int nr)
-{
-    const l3_pgentry_t *l3tab, *pl3e;
-
-    ASSERT(va >= PERDOMAIN_VIRT_START &&
-           va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
-    ASSERT(!l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
-
-    if ( !d->arch.perdomain_l3_pg )
-        return;
-
-    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
-    pl3e = l3tab + l3_table_offset(va);
-
-    if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
-    {
-        const l2_pgentry_t *l2tab = map_l2t_from_l3e(*pl3e);
-        const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
-        unsigned int i = l1_table_offset(va);
-
-        while ( nr )
-        {
-            if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
-            {
-                l1_pgentry_t *l1tab = map_l1t_from_l2e(*pl2e);
-
-                for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
-                {
-                    if ( (l1e_get_flags(l1tab[i]) &
-                          (_PAGE_PRESENT | _PAGE_AVAIL0)) ==
-                         (_PAGE_PRESENT | _PAGE_AVAIL0) )
-                        free_domheap_page(l1e_get_page(l1tab[i]));
-                    l1tab[i] = l1e_empty();
-                }
-
-                unmap_domain_page(l1tab);
-            }
-            else if ( nr + i < L1_PAGETABLE_ENTRIES )
-                break;
-            else
-                nr -= L1_PAGETABLE_ENTRIES - i;
-
-            ++pl2e;
-            i = 0;
-        }
-
-        unmap_domain_page(l2tab);
-    }
-
-    unmap_domain_page(l3tab);
-}
-
-void free_perdomain_mappings(struct domain *d)
-{
-    l3_pgentry_t *l3tab;
-    unsigned int i;
-
-    if ( !d->arch.perdomain_l3_pg )
-        return;
-
-    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
-
-    for ( i = 0; i < PERDOMAIN_SLOTS; ++i)
-        if ( l3e_get_flags(l3tab[i]) & _PAGE_PRESENT )
-        {
-            struct page_info *l2pg = l3e_get_page(l3tab[i]);
-            l2_pgentry_t *l2tab = __map_domain_page(l2pg);
-            unsigned int j;
-
-            for ( j = 0; j < L2_PAGETABLE_ENTRIES; ++j )
-                if ( l2e_get_flags(l2tab[j]) & _PAGE_PRESENT )
-                {
-                    struct page_info *l1pg = l2e_get_page(l2tab[j]);
-
-                    if ( l2e_get_flags(l2tab[j]) & _PAGE_AVAIL0 )
-                    {
-                        l1_pgentry_t *l1tab = __map_domain_page(l1pg);
-                        unsigned int k;
-
-                        for ( k = 0; k < L1_PAGETABLE_ENTRIES; ++k )
-                            if ( (l1e_get_flags(l1tab[k]) &
-                                  (_PAGE_PRESENT | _PAGE_AVAIL0)) ==
-                                 (_PAGE_PRESENT | _PAGE_AVAIL0) )
-                                free_domheap_page(l1e_get_page(l1tab[k]));
-
-                        unmap_domain_page(l1tab);
-                    }
-
-                    if ( is_xen_heap_page(l1pg) )
-                        free_xenheap_page(page_to_virt(l1pg));
-                    else
-                        free_domheap_page(l1pg);
-                }
-
-            unmap_domain_page(l2tab);
-            free_domheap_page(l2pg);
-        }
-
-    unmap_domain_page(l3tab);
-    free_domheap_page(d->arch.perdomain_l3_pg);
-    d->arch.perdomain_l3_pg = NULL;
-}
-
 #ifdef MEMORY_GUARD
 
 static void __memguard_change_range(void *p, unsigned long l, int guard)
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 60a88bd..cce7541 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -91,26 +91,11 @@ int switch_compat(struct domain *d)
     return rc;
 }
 
-static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
-{
-    return create_perdomain_mapping(v->domain, GDT_VIRT_START(v),
-                                    1U << GDT_LDT_VCPU_SHIFT,
-                                    v->domain->arch.pv_domain.gdt_ldt_l1tab,
-                                    NULL);
-}
-
-static void pv_destroy_gdt_ldt_l1tab(struct vcpu *v)
-{
-    destroy_perdomain_mapping(v->domain, GDT_VIRT_START(v),
-                              1U << GDT_LDT_VCPU_SHIFT);
-}
-
 void pv_vcpu_destroy(struct vcpu *v)
 {
     if ( is_pv_32bit_vcpu(v) )
         release_compat_l4(v);
 
-    pv_destroy_gdt_ldt_l1tab(v);
     xfree(v->arch.pv_vcpu.trap_ctxt);
     v->arch.pv_vcpu.trap_ctxt = NULL;
 }
@@ -122,10 +107,6 @@ int pv_vcpu_initialise(struct vcpu *v)
 
     ASSERT(!is_idle_domain(d));
 
-    rc = pv_create_gdt_ldt_l1tab(v);
-    if ( rc )
-        return rc;
-
     BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
                  PAGE_SIZE);
     v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
@@ -147,6 +128,8 @@ int pv_vcpu_initialise(struct vcpu *v)
             goto done;
     }
 
+    rc = 0; /* Success */
+
  done:
     if ( rc )
         pv_vcpu_destroy(v);
@@ -155,14 +138,8 @@ int pv_vcpu_initialise(struct vcpu *v)
 
 void pv_domain_destroy(struct domain *d)
 {
-    destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
-                              GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
-
     xfree(d->arch.pv_domain.cpuidmasks);
     d->arch.pv_domain.cpuidmasks = NULL;
-
-    free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
-    d->arch.pv_domain.gdt_ldt_l1tab = NULL;
 }
 
 
@@ -176,12 +153,6 @@ int pv_domain_initialise(struct domain *d, unsigned int 
domcr_flags,
     };
     int rc = -ENOMEM;
 
-    d->arch.pv_domain.gdt_ldt_l1tab =
-        alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
-    if ( !d->arch.pv_domain.gdt_ldt_l1tab )
-        goto fail;
-    clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
-
     if ( levelling_caps & ~LCAP_faulting )
     {
         d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
@@ -190,12 +161,6 @@ int pv_domain_initialise(struct domain *d, unsigned int 
domcr_flags,
         *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
     }
 
-    rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
-                                  GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
-                                  NULL, NULL);
-    if ( rc )
-        goto fail;
-
     d->arch.ctxt_switch = &pv_csw;
 
     /* 64-bit PV guest by default. */
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 62549a8..cf6f1be 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -131,9 +131,6 @@ extern unsigned char boot_edid_info[128];
  *    Guest linear page table.
  *  0xffff820000000000 - 0xffff827fffffffff [512GB, 2^39 bytes, PML4:260]
  *    Shadow linear page table.
- *
- *                !!! WARNING - TEMPORARILY STALE BELOW !!!
- *
  *  0xffff828000000000 - 0xffff82bfffffffff [256GB, 2^38 bytes, PML4:261]
  *    Machine-to-phys translation table.
  *  0xffff82c000000000 - 0xffff82cfffffffff [64GB,  2^36 bytes, PML4:261]
@@ -207,17 +204,8 @@ extern unsigned char boot_edid_info[128];
 /* Slot 260: linear page table (shadow table). */
 #define SH_LINEAR_PT_VIRT_START (PML4_ADDR(260))
 #define SH_LINEAR_PT_VIRT_END   (SH_LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES)
-/* Slot 261: per-domain mappings (including map cache). */
-#define PERDOMAIN_VIRT_START    (PML4_ADDR(261))
-#define PERDOMAIN_SLOT_MBYTES   (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER))
-#define PERDOMAIN_SLOTS         3
-#define PERDOMAIN_VIRT_SLOT(s)  (PERDOMAIN_VIRT_START + (s) * \
-                                 (PERDOMAIN_SLOT_MBYTES << 20))
-/*
- *                !!! WARNING - TEMPORARILY STALE BELOW !!!
- */
 /* Slot 261: machine-to-phys conversion table (256GB). */
-#define RDWR_MPT_VIRT_START     (PML4_ADDR(262))
+#define RDWR_MPT_VIRT_START     (PML4_ADDR(261))
 #define RDWR_MPT_VIRT_END       (RDWR_MPT_VIRT_START + MPT_VIRT_SIZE)
 /* Slot 261: vmap()/ioremap()/fixmap area (64GB). */
 #define VMAP_VIRT_START         RDWR_MPT_VIRT_END
@@ -245,12 +233,12 @@ extern unsigned char boot_edid_info[128];
 
 #ifndef CONFIG_BIGMEM
 /* Slot 262-271/510: A direct 1:1 mapping of all of physical memory. */
-#define DIRECTMAP_VIRT_START    (PML4_ADDR(263))
-#define DIRECTMAP_SIZE          (PML4_ENTRY_BYTES * (511 - 263))
+#define DIRECTMAP_VIRT_START    (PML4_ADDR(262))
+#define DIRECTMAP_SIZE          (PML4_ENTRY_BYTES * (511 - 262))
 #else
 /* Slot 265-271/510: A direct 1:1 mapping of all of physical memory. */
-#define DIRECTMAP_VIRT_START    (PML4_ADDR(266))
-#define DIRECTMAP_SIZE          (PML4_ENTRY_BYTES * (511 - 266))
+#define DIRECTMAP_VIRT_START    (PML4_ADDR(265))
+#define DIRECTMAP_SIZE          (PML4_ENTRY_BYTES * (511 - 265))
 #endif
 #define DIRECTMAP_VIRT_END      (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE)
 
@@ -308,19 +296,7 @@ extern unsigned long xen_phys_start;
 #define PERCPU_LDT_MAPPING       (PERCPU_LINEAR_START + MB(11))
 #define PERCPU_LDT_MAPPING_END   (PERCPU_LDT_MAPPING + 0x10000)
 
-/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */
-#define GDT_LDT_VCPU_SHIFT       5
-#define GDT_LDT_VCPU_VA_SHIFT    (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT)
-#define GDT_LDT_MBYTES           PERDOMAIN_SLOT_MBYTES
-#define MAX_VIRT_CPUS            (GDT_LDT_MBYTES << (20-GDT_LDT_VCPU_VA_SHIFT))
-#define GDT_LDT_VIRT_START       PERDOMAIN_VIRT_SLOT(0)
-#define GDT_LDT_VIRT_END         (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20))
-
-/* The address of a particular VCPU's GDT or LDT. */
-#define GDT_VIRT_START(v)    \
-    (PERDOMAIN_VIRT_START + ((v)->vcpu_id << GDT_LDT_VCPU_VA_SHIFT))
-#define LDT_VIRT_START(v)    \
-    (GDT_VIRT_START(v) + (64*1024))
+#define MAX_VIRT_CPUS            8192
 
 #define NATIVE_VM_ASSIST_VALID   ((1UL << VMASST_TYPE_4gb_segments)        | \
                                   (1UL << VMASST_TYPE_4gb_segments_notify) | \
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 108b3a4..ac75248 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -211,8 +211,6 @@ struct time_scale {
 
 struct pv_domain
 {
-    l1_pgentry_t **gdt_ldt_l1tab;
-
     atomic_t nr_l4_pages;
 
     struct cpuidmasks *cpuidmasks;
@@ -235,8 +233,6 @@ struct monitor_write_data {
 
 struct arch_domain
 {
-    struct page_info *perdomain_l3_pg;
-
     unsigned int hv_compat_vstart;
 
     /* Maximum physical-address bitwidth supported by this guest. */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 54b7499..22c2809 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -591,16 +591,6 @@ long subarch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg);
 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
 
-#define NIL(type) ((type *)-sizeof(type))
-#define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
-
-int create_perdomain_mapping(struct domain *, unsigned long va,
-                             unsigned int nr, l1_pgentry_t **,
-                             struct page_info **);
-void destroy_perdomain_mapping(struct domain *, unsigned long va,
-                               unsigned int nr);
-void free_perdomain_mappings(struct domain *);
-
 extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int 
pxm);
 
 void domain_set_alloc_bitsize(struct domain *d);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.