WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] p2m: move phystable into p2m

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] p2m: move phystable into p2m
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 21 May 2010 08:25:13 -0700
Delivery-date: Fri, 21 May 2010 08:27:04 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1274365849 -3600
# Node ID e2a5a8bfeea7c605a752c1b5d3fc82187dc9e149
# Parent  c2155204075d14fd5d56de2b6394044298626d69
p2m: move phystable into p2m

Moves phys_table from struct domain to struct p2m_domain.

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c       |    3 ++-
 xen/arch/x86/hvm/svm/vmcb.c      |    4 ++--
 xen/arch/x86/hvm/vmx/vmx.c       |    2 +-
 xen/arch/x86/mm/hap/hap.c        |    7 ++++---
 xen/arch/x86/mm/hap/p2m-ept.c    |   17 ++++++++++-------
 xen/arch/x86/mm/p2m.c            |   29 +++++++++++++++--------------
 xen/arch/x86/mm/shadow/common.c  |    2 +-
 xen/arch/x86/mm/shadow/multi.c   |    6 +++---
 xen/include/asm-ia64/p2m_entry.h |    2 ++
 xen/include/asm-x86/domain.h     |    3 ---
 xen/include/asm-x86/p2m.h        |    9 +++++++++
 11 files changed, 49 insertions(+), 35 deletions(-)

diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu May 20 15:30:49 2010 +0100
@@ -217,6 +217,7 @@ static int svm_vmcb_restore(struct vcpu 
     unsigned long mfn = 0;
     p2m_type_t p2mt;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
     if ( c->pending_valid &&
          ((c->pending_type == 1) || (c->pending_type > 6) ||
@@ -262,7 +263,7 @@ static int svm_vmcb_restore(struct vcpu 
     {
         vmcb->np_enable = 1;
         vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
-        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+        vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m));
     }
 
     if ( c->pending_valid ) 
diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu May 20 15:30:49 2010 +0100
@@ -26,7 +26,7 @@
 #include <asm/cpufeature.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
-#include <asm/paging.h>
+#include <asm/p2m.h>
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/io.h>
 #include <asm/hvm/support.h>
@@ -232,7 +232,7 @@ static int construct_vmcb(struct vcpu *v
     {
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
-        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+        vmcb->h_cr3 = 
pagetable_get_paddr(p2m_get_pagetable(p2m_get_hostp2m(v->domain)));
 
         /* No point in intercepting CR3 reads/writes. */
         vmcb->cr_intercepts &= ~(CR_INTERCEPT_CR3_READ|CR_INTERCEPT_CR3_WRITE);
diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu May 20 15:30:49 2010 +0100
@@ -81,7 +81,7 @@ static int vmx_domain_initialise(struct 
     d->arch.hvm_domain.vmx.ept_control.etmt = EPT_DEFAULT_MT;
     d->arch.hvm_domain.vmx.ept_control.gaw  = EPT_DEFAULT_GAW;
     d->arch.hvm_domain.vmx.ept_control.asr  =
-        pagetable_get_pfn(d->arch.phys_table);
+        pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
 
 
     if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Thu May 20 15:30:49 2010 +0100
@@ -410,7 +410,7 @@ static void hap_install_xen_entries_in_l
 
     /* Install the domain-specific P2M table */
     l4e[l4_table_offset(RO_MPT_VIRT_START)] =
-        l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
+        
l4e_from_pfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))),
                      __PAGE_HYPERVISOR);
 
     hap_unmap_domain_page(l4e);
@@ -421,6 +421,7 @@ static void hap_install_xen_entries_in_l
 static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
 {
     struct domain *d = v->domain;
+    struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
     l2_pgentry_t *l2e;
     l3_pgentry_t *p2m;
     int i;
@@ -446,8 +447,8 @@ static void hap_install_xen_entries_in_l
             l2e_empty();
 
     /* Install the domain-specific p2m table */
-    ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
-    p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
+    ASSERT(pagetable_get_pfn(p2m_get_pagetable(hostp2m)) != 0);
+    p2m = hap_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(hostp2m)));
     for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
     {
         l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c     Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/mm/hap/p2m-ept.c     Thu May 20 15:30:49 2010 +0100
@@ -242,12 +242,13 @@ ept_set_entry(struct domain *d, unsigned
     int direct_mmio = (p2mt == p2m_mmio_direct);
     uint8_t ipat = 0;
     int need_modify_vtd_table = 1;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     if (  order != 0 )
         if ( (gfn & ((1UL << order) - 1)) )
             return 1;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 
     ASSERT(table != NULL);
 
@@ -370,7 +371,7 @@ static mfn_t ept_get_entry(struct domain
                            p2m_query_t q)
 {
     ept_entry_t *table =
-        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     unsigned long gfn_remainder = gfn;
     ept_entry_t *ept_entry;
     u32 index;
@@ -464,7 +465,7 @@ static ept_entry_t ept_get_entry_content
 static ept_entry_t ept_get_entry_content(struct domain *d, unsigned long gfn, 
int *level)
 {
     ept_entry_t *table =
-        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     unsigned long gfn_remainder = gfn;
     ept_entry_t *ept_entry;
     ept_entry_t content = { .epte = 0 };
@@ -499,7 +500,7 @@ void ept_walk_table(struct domain *d, un
 void ept_walk_table(struct domain *d, unsigned long gfn)
 {
     ept_entry_t *table =
-        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     unsigned long gfn_remainder = gfn;
 
     int i;
@@ -639,12 +640,12 @@ static void ept_change_entry_type_global
     int i2;
     int i1;
 
-    if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) == 0 )
         return;
 
     BUG_ON(EPT_DEFAULT_GAW != 3);
 
-    l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    l4e = 
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ )
     {
         if ( !l4e[i4].epte )
@@ -739,12 +740,14 @@ static void ept_dump_p2m_table(unsigned 
     unsigned long index;
     unsigned long gfn, gfn_remainder;
     unsigned long record_counter = 0;
+    struct p2m_domain *p2m;
 
     for_each_domain(d)
     {
         if ( !(is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled) )
             continue;
 
+        p2m = p2m_get_hostp2m(d);
         printk("\ndomain%d EPT p2m table: \n", d->domain_id);
 
         for ( gfn = 0; gfn <= d->arch.p2m->max_mapped_pfn; gfn += (1 << order) 
)
@@ -752,7 +755,7 @@ static void ept_dump_p2m_table(unsigned 
             gfn_remainder = gfn;
             mfn = _mfn(INVALID_MFN);
             table =
-                map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+                
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 
             for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
             {
diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu May 20 15:30:49 2010 +0100
@@ -1253,7 +1253,7 @@ p2m_set_entry(struct domain *d, unsigned
               unsigned int page_order, p2m_type_t p2mt)
 {
     // XXX -- this might be able to be faster iff current->domain == d
-    mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
+    mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
     void *table =map_domain_page(mfn_x(table_mfn));
     unsigned long i, gfn_remainder = gfn;
     l1_pgentry_t *p2m_entry;
@@ -1408,7 +1408,7 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
 
-    mfn = pagetable_get_mfn(d->arch.phys_table);
+    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
 
     if ( gfn > d->arch.p2m->max_mapped_pfn )
         /* This pfn is higher than the highest the p2m map currently holds */
@@ -1798,11 +1798,11 @@ int p2m_alloc_table(struct domain *d,
     struct page_info *page, *p2m_top;
     unsigned int page_count = 0;
     unsigned long gfn = -1UL;
-    struct p2m_domain *p2m = d->arch.p2m;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     p2m_lock(p2m);
 
-    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
     {
         P2M_ERROR("p2m already allocated for this domain\n");
         p2m_unlock(p2m);
@@ -1828,7 +1828,7 @@ int p2m_alloc_table(struct domain *d,
         return -ENOMEM;
     }
 
-    d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
+    p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
 
     P2M_PRINTK("populating p2m table\n");
 
@@ -1872,7 +1872,7 @@ void p2m_teardown(struct domain *d)
  * We know we don't have any extra mappings to these pages */
 {
     struct page_info *pg;
-    struct p2m_domain *p2m = d->arch.p2m;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     unsigned long gfn;
     p2m_type_t t;
     mfn_t mfn;
@@ -1884,7 +1884,7 @@ void p2m_teardown(struct domain *d)
         if(mfn_valid(mfn) && (t == p2m_ram_shared))
             BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
     }
-    d->arch.phys_table = pagetable_null();
+    p2m->phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         p2m->free_page(d, pg);
@@ -1995,7 +1995,7 @@ static void audit_p2m(struct domain *d)
     spin_unlock(&d->page_alloc_lock);
 
     /* Audit part two: walk the domain's p2m table, checking the entries. */
-    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)) != 0 )
     {
         l2_pgentry_t *l2e;
         l1_pgentry_t *l1e;
@@ -2005,11 +2005,11 @@ static void audit_p2m(struct domain *d)
         l4_pgentry_t *l4e;
         l3_pgentry_t *l3e;
         int i3, i4;
-        l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        l4e = 
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
 #else /* CONFIG_PAGING_LEVELS == 3 */
         l3_pgentry_t *l3e;
         int i3;
-        l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        l3e = 
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
 #endif
 
         gfn = 0;
@@ -2421,22 +2421,23 @@ void p2m_change_type_global(struct domai
     l4_pgentry_t *l4e;
     unsigned long i4;
 #endif /* CONFIG_PAGING_LEVELS == 4 */
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
 
     if ( !paging_mode_translate(d) )
         return;
 
-    if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
         return;
 
     ASSERT(p2m_locked_by_me(d->arch.p2m));
 
 #if CONFIG_PAGING_LEVELS == 4
-    l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 #else /* CONFIG_PAGING_LEVELS == 3 */
-    l3mfn = _mfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
-    l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 #endif
 
 #if CONFIG_PAGING_LEVELS >= 4
diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Thu May 20 15:30:49 2010 +0100
@@ -3177,7 +3177,7 @@ int shadow_enable(struct domain *d, u32 
  out_locked:
     shadow_unlock(d);
  out_unlocked:
-    if ( rv != 0 && !pagetable_is_null(d->arch.phys_table) )
+    if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m_get_hostp2m(d))) )
         p2m_teardown(d);
     if ( rv != 0 && pg != NULL )
         shadow_free_p2m_page(d, pg);
diff -r c2155204075d -r e2a5a8bfeea7 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu May 20 14:12:14 2010 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu May 20 15:30:49 2010 +0100
@@ -1476,7 +1476,7 @@ void sh_install_xen_entries_in_l4(struct
     {
         /* install domain-specific P2M table */
         sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
-            shadow_l4e_from_mfn(pagetable_get_mfn(d->arch.phys_table),
+            
shadow_l4e_from_mfn(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))),
                                 __PAGE_HYPERVISOR);
     }
 
@@ -1535,8 +1535,8 @@ static void sh_install_xen_entries_in_l2
     {
         /* Install the domain-specific p2m table */
         l3_pgentry_t *p2m;
-        ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
-        p2m = sh_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
+        ASSERT(pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) != 0);
+        p2m = 
sh_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))));
         for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
         {
             sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START) + i] =
diff -r c2155204075d -r e2a5a8bfeea7 xen/include/asm-ia64/p2m_entry.h
--- a/xen/include/asm-ia64/p2m_entry.h  Thu May 20 14:12:14 2010 +0100
+++ b/xen/include/asm-ia64/p2m_entry.h  Thu May 20 15:30:49 2010 +0100
@@ -63,6 +63,8 @@ p2m_entry_retry(struct p2m_entry* entry)
 #endif
 }
 
+#define p2m_get_hostp2m(d) (d)
+
 #endif // __ASM_P2M_ENTRY_H__
 
 /*
diff -r c2155204075d -r e2a5a8bfeea7 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Thu May 20 14:12:14 2010 +0100
+++ b/xen/include/asm-x86/domain.h      Thu May 20 15:30:49 2010 +0100
@@ -246,9 +246,6 @@ struct arch_domain
 
     struct paging_domain paging;
     struct p2m_domain *p2m;
-
-    /* Shadow translated domain: P2M mapping */
-    pagetable_t phys_table;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     int *irq_pirq;
diff -r c2155204075d -r e2a5a8bfeea7 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu May 20 14:12:14 2010 +0100
+++ b/xen/include/asm-x86/p2m.h Thu May 20 15:30:49 2010 +0100
@@ -29,6 +29,7 @@
 #include <xen/config.h>
 #include <xen/paging.h>
 #include <asm/mem_sharing.h>
+#include <asm/page.h>    /* for pagetable_t */
 
 /*
  * The phys_to_machine_mapping maps guest physical frame numbers 
@@ -165,6 +166,9 @@ struct p2m_domain {
     spinlock_t         lock;
     int                locker;   /* processor which holds the lock */
     const char        *locker_function; /* Func that took it */
+
+    /* Shadow translated domain: p2m mapping */
+    pagetable_t        phys_table;
 
     /* Pages used to construct the p2m */
     struct page_list_head pages;
@@ -215,6 +219,11 @@ struct p2m_domain {
     } pod;
 };
 
+/* get host p2m table */
+#define p2m_get_hostp2m(d)      ((d)->arch.p2m)
+
+#define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
+
 /*
  * The P2M lock.  This protects all updates to the p2m table.
  * Updates are expected to be safe against concurrent reads,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] p2m: move phystable into p2m, Xen patchbot-unstable <=