WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] HL2's are now filled in on demand, rather than by doing

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] HL2's are now filled in on demand, rather than by doing the entire thing
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Tue, 22 Mar 2005 18:02:32 +0000
Delivery-date: Tue, 05 Apr 2005 16:16:09 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1254, 2005/03/22 18:02:32+00:00, mafetter@xxxxxxxxxxxxxxxx

        HL2's are now filled in on demand, rather than by doing the entire thing
        on creation.  Also fixed a bug in hl2 ref counting.  hl2 entries don't
        take a writable ref to the guest pages, as they are xen mappings, not
        guest mappings.  Also fixed a tlb flushing bug with hl2 entries.
        
        Bug fix for shadow table ref counting.  CR3's shadow table could, in 
theory,
        get released while it's still pointing at it.  Fixed.
        
        Bug fix for shadow code with tlb flushes from hypervisor calls.
        
        Signed-off-by: michael.fetterman@xxxxxxxxxxxx



 arch/x86/audit.c         |    2 +
 arch/x86/mm.c            |   20 +++++++++++---
 arch/x86/shadow.c        |   25 ++++-------------
 include/asm-x86/domain.h |    2 +
 include/asm-x86/shadow.h |   66 ++++++++++++++++++++++++++++++++++-------------
 include/xen/perfc_defn.h |    1 
 6 files changed, 76 insertions(+), 40 deletions(-)


diff -Nru a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c      2005-04-05 12:16:13 -04:00
+++ b/xen/arch/x86/audit.c      2005-04-05 12:16:13 -04:00
@@ -411,6 +411,8 @@
                     if ( pagetable_val(ed->arch.shadow_table) )
                         
adjust(&frame_table[pagetable_val(ed->arch.shadow_table)
                                             >> PAGE_SHIFT], 0);
+                    if ( ed->arch.monitor_shadow_ref )
+                        adjust(&frame_table[ed->arch.monitor_shadow_ref], 0);
                 }
             }
     }
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-04-05 12:16:13 -04:00
+++ b/xen/arch/x86/mm.c 2005-04-05 12:16:13 -04:00
@@ -1271,6 +1271,18 @@
             put_page(&frame_table[old_base_mfn]);
         else
             put_page_and_type(&frame_table[old_base_mfn]);
+
+        // CR3 holds its own ref to its shadow...
+        //
+        if ( shadow_mode_enabled(d) )
+        {
+            if ( ed->arch.monitor_shadow_ref )
+                put_shadow_ref(ed->arch.monitor_shadow_ref);
+            ed->arch.monitor_shadow_ref =
+                pagetable_val(ed->arch.monitor_table) >> PAGE_SHIFT;
+            ASSERT(page_get_owner(&frame_table[ed->arch.monitor_shadow_ref]) 
== NULL);
+            get_shadow_ref(ed->arch.monitor_shadow_ref);
+        }
     }
     else
     {
@@ -1386,9 +1398,9 @@
         break;
     
     case MMUEXT_INVLPG:
-        __flush_tlb_one(ptr);
         if ( shadow_mode_enabled(d) )
             shadow_invlpg(ed, ptr);
+        __flush_tlb_one(ptr);
         break;
 
     case MMUEXT_FLUSH_CACHE:
@@ -1940,9 +1952,9 @@
 
     if ( deferred_ops & DOP_FLUSH_TLB )
     {
-        local_flush_tlb();
         if ( shadow_mode_enabled(d) )
             shadow_sync_all(d);
+        local_flush_tlb();
     }
         
     if ( deferred_ops & DOP_RELOAD_LDT )
@@ -2072,15 +2084,15 @@
     if ( unlikely(deferred_ops & DOP_FLUSH_TLB) || 
          unlikely(flags & UVMF_FLUSH_TLB) )
     {
-        local_flush_tlb();
         if ( unlikely(shadow_mode_enabled(d)) )
             shadow_sync_all(d);
+        local_flush_tlb();
     }
     else if ( unlikely(flags & UVMF_INVLPG) )
     {
-        __flush_tlb_one(va);
         if ( unlikely(shadow_mode_enabled(d)) )
             shadow_invlpg(current, va);
+        __flush_tlb_one(va);
     }
 
     if ( unlikely(deferred_ops & DOP_RELOAD_LDT) )
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     2005-04-05 12:16:13 -04:00
+++ b/xen/arch/x86/shadow.c     2005-04-05 12:16:14 -04:00
@@ -62,10 +62,14 @@
     if ( unlikely(page_is_page_table(page)) )
         return 1;
 
-    FSH_LOG("shadow_promote gpfn=%p gmfn=%p nt=%p", gpfn, gmfn, new_type);
+    FSH_LOG("%s: gpfn=%p gmfn=%p nt=%p", __func__, gpfn, gmfn, new_type);
 
     if ( !shadow_remove_all_write_access(d, gpfn, gmfn) )
+    {
+        FSH_LOG("%s: couldn't find/remove all write accesses, gpfn=%p 
gmfn=%p\n",
+                __func__, gpfn, gmfn);
         return 0;
+    }
 
     // To convert this page to use as a page table, the writable count
     // should now be zero.  Test this by grabbing the page as an page table,
@@ -1236,8 +1240,7 @@
 {
     unsigned long hl2mfn;
     l1_pgentry_t *hl2;
-    l2_pgentry_t *gl2;
-    int i, limit;
+    int limit;
 
     ASSERT(PGT_base_page_table == PGT_l2_page_table);
 
@@ -1249,7 +1252,6 @@
 
     perfc_incrc(shadow_hl2_table_count);
 
-    gl2 = map_domain_mem(gmfn << PAGE_SHIFT);
     hl2 = map_domain_mem(hl2mfn << PAGE_SHIFT);
 
     if ( shadow_mode_external(d) )
@@ -1257,19 +1259,7 @@
     else
         limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
 
-    for ( i = 0; i < limit; i++ )
-    {
-        unsigned long gl2e = l2_pgentry_val(gl2[i]);
-        unsigned long hl2e;
-
-        hl2e_propagate_from_guest(d, gl2e, &hl2e);
-
-        if ( (hl2e & _PAGE_PRESENT) &&
-             !get_page(pfn_to_page(hl2e >> PAGE_SHIFT), d) )
-            hl2e = 0;
-
-        hl2[i] = mk_l1_pgentry(hl2e);
-    }
+    memset(hl2, 0, limit * sizeof(l1_pgentry_t));
 
     if ( !shadow_mode_external(d) )
     {
@@ -1287,7 +1277,6 @@
     }
 
     unmap_domain_mem(hl2);
-    unmap_domain_mem(gl2);
 
     return hl2mfn;
 }
diff -Nru a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      2005-04-05 12:16:13 -04:00
+++ b/xen/include/asm-x86/domain.h      2005-04-05 12:16:13 -04:00
@@ -122,6 +122,8 @@
     l2_pgentry_t *monitor_vtable;              /* virtual address of 
monitor_table */
     l1_pgentry_t *hl2_vtable;                  /* virtual address of hl2_table 
*/
 
+    unsigned long monitor_shadow_ref;
+
     /* Virtual CR2 value. Can be read/written by guest. */
     unsigned long guest_cr2;
 
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      2005-04-05 12:16:13 -04:00
+++ b/xen/include/asm-x86/shadow.h      2005-04-05 12:16:13 -04:00
@@ -67,6 +67,7 @@
 
 static inline unsigned long __shadow_status(
     struct domain *d, unsigned long gpfn, unsigned long stype);
+static inline void update_hl2e(struct exec_domain *ed, unsigned long va);
 
 extern void vmx_shadow_clear_state(struct domain *);
 
@@ -121,6 +122,11 @@
         //
         __shadow_sync_all(ed->domain);
     }
+
+    // Also make sure the HL2 is up-to-date for this address.
+    //
+    if ( unlikely(shadow_mode_translate(ed->domain)) )
+        update_hl2e(ed, va);
 }
 
 static void inline
@@ -314,34 +320,58 @@
 __guest_set_l2e(
     struct exec_domain *ed, unsigned long va, unsigned long value)
 {
+    ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+
     if ( unlikely(shadow_mode_translate(ed->domain)) )
-    {
-        unsigned long mfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
-        unsigned long old_hl2e =
-            l1_pgentry_val(ed->arch.hl2_vtable[l2_table_offset(va)]);
-        unsigned long new_hl2e =
-            (VALID_MFN(mfn) ? ((mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR) : 0);
+        update_hl2e(ed, va);
+}
 
-        // only do the ref counting if something important changed.
-        //
-        if ( (old_hl2e ^ new_hl2e) & (PAGE_MASK | _PAGE_PRESENT) )
+static inline void
+update_hl2e(struct exec_domain *ed, unsigned long va)
+{
+    int index = l2_table_offset(va);
+    unsigned long gl2e = l2_pgentry_val(ed->arch.guest_vtable[index]);
+    unsigned long mfn;
+    unsigned long old_hl2e, new_hl2e;
+    int need_flush = 0;
+
+    ASSERT(shadow_mode_translate(ed->domain));
+
+    old_hl2e = l1_pgentry_val(ed->arch.hl2_vtable[index]);
+
+    if ( (gl2e & _PAGE_PRESENT) &&
+         VALID_MFN(mfn = phys_to_machine_mapping(gl2e >> PAGE_SHIFT)) )
+        new_hl2e = (mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR;
+    else
+        new_hl2e = 0;
+
+    // only do the ref counting if something important changed.
+    //
+    if ( (old_hl2e ^ new_hl2e) & (PAGE_MASK | _PAGE_PRESENT) )
+    {
+        if ( (new_hl2e & _PAGE_PRESENT) &&
+             !get_page(pfn_to_page(new_hl2e >> PAGE_SHIFT), ed->domain) )
+            new_hl2e = 0;
+        if ( old_hl2e & _PAGE_PRESENT )
         {
-            if ( (new_hl2e & _PAGE_PRESENT) &&
-                 !shadow_get_page_from_l1e(mk_l1_pgentry(new_hl2e), 
ed->domain) )
-                new_hl2e = 0;
-            if ( old_hl2e & _PAGE_PRESENT )
-                put_page_from_l1e(mk_l1_pgentry(old_hl2e), ed->domain);
+            put_page(pfn_to_page(old_hl2e >> PAGE_SHIFT));
+            need_flush = 1;
         }
-
-        ed->arch.hl2_vtable[l2_table_offset(va)] = mk_l1_pgentry(new_hl2e);
     }
 
-    ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+    ed->arch.hl2_vtable[l2_table_offset(va)] = mk_l1_pgentry(new_hl2e);
+
+    if ( need_flush )
+    {
+        perfc_incrc(update_hl2e_invlpg);
+        __flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
+    }
 }
 
+
 /************************************************************************/
 
-//#define MFN3_TO_WATCH 0x1ff6e
+//#define MFN3_TO_WATCH 0x8575
 #ifdef MFN3_TO_WATCH

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] HL2's are now filled in on demand, rather than by doing the entire thing, BitKeeper Bot <=