WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Allow log-dirty mode to be enabled

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Allow log-dirty mode to be enabled on already-shadowed domains.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 26 Feb 2007 15:45:37 -0800
Delivery-date: Mon, 26 Feb 2007 15:46:40 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1172498161 0
# Node ID 720afbf740018c55ce984c09bbc61dbeb4f0d74e
# Parent  bfd4fad0f0529449a1204426770c226824c97e2e
[XEN] Allow log-dirty mode to be enabled on already-shadowed domains.
and catch a few missing mark_dirty() calls
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |    2 +
 xen/arch/x86/hvm/io.c            |   10 ++++++++
 xen/arch/x86/mm/shadow/common.c  |   47 ++++++++++++++-------------------------
 xen/arch/x86/mm/shadow/multi.c   |   34 ++++------------------------
 xen/arch/x86/mm/shadow/private.h |    3 --
 xen/include/asm-x86/shadow.h     |    5 ++--
 6 files changed, 39 insertions(+), 62 deletions(-)

diff -r bfd4fad0f052 -r 720afbf74001 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Feb 26 11:53:35 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Mon Feb 26 13:56:01 2007 +0000
@@ -398,6 +398,8 @@ static int __hvm_copy(void *buf, paddr_t
             memcpy(buf, p, count); /* dir == FALSE: *from guest */
 
         unmap_domain_page(p);
+        
+        mark_dirty(current->domain, mfn);
 
         addr += count;
         buf  += count;
diff -r bfd4fad0f052 -r 720afbf74001 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Mon Feb 26 11:53:35 2007 +0000
+++ b/xen/arch/x86/hvm/io.c     Mon Feb 26 13:56:01 2007 +0000
@@ -33,6 +33,8 @@
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <asm/paging.h>
+#include <asm/shadow.h>
+#include <asm/p2m.h>
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/vpt.h>
@@ -739,6 +741,7 @@ void hvm_io_assist(struct vcpu *v)
     ioreq_t *p;
     struct cpu_user_regs *regs;
     struct hvm_io_op *io_opp;
+    unsigned long gmfn;
 
     io_opp = &v->arch.hvm_vcpu.io_op;
     regs   = &io_opp->io_context;
@@ -763,6 +766,13 @@ void hvm_io_assist(struct vcpu *v)
     /* Copy register changes back into current guest state. */
     hvm_load_cpu_guest_regs(v, regs);
     memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
+
+    /* Has memory been dirtied? */
+    if ( p->dir == IOREQ_READ && p->data_is_ptr ) 
+    {
+        gmfn = get_mfn_from_gpfn(paging_gva_to_gfn(v, p->data));
+        mark_dirty(v->domain, gmfn);
+    }
 }
 
 /*
diff -r bfd4fad0f052 -r 720afbf74001 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Feb 26 11:53:35 2007 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Feb 26 13:56:01 2007 +0000
@@ -981,7 +981,6 @@ mfn_t shadow_alloc(struct domain *d,
         INIT_LIST_HEAD(&sp[i].list);
         sp[i].type = shadow_type;
         sp[i].pinned = 0;
-        sp[i].logdirty = 0;
         sp[i].count = 0;
         sp[i].backpointer = backpointer;
         sp[i].next_shadow = NULL;
@@ -1230,7 +1229,6 @@ static unsigned int sh_set_allocation(st
             {
                 sp[j].type = 0;  
                 sp[j].pinned = 0;
-                sp[j].logdirty = 0;
                 sp[j].count = 0;
                 sp[j].mbz = 0;
                 sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
@@ -2558,7 +2556,7 @@ static int shadow_one_bit_enable(struct 
     ASSERT(shadow_locked_by_me(d));
 
     /* Sanity check the call */
-    if ( d == current->domain || (d->arch.paging.mode & mode) )
+    if ( d == current->domain || (d->arch.paging.mode & mode) == mode )
     {
         return -EINVAL;
     }
@@ -2589,7 +2587,7 @@ static int shadow_one_bit_disable(struct
     ASSERT(shadow_locked_by_me(d));
 
     /* Sanity check the call */
-    if ( d == current->domain || !(d->arch.paging.mode & mode) )
+    if ( d == current->domain || !((d->arch.paging.mode & mode) == mode) )
     {
         return -EINVAL;
     }
@@ -2646,17 +2644,7 @@ static int shadow_test_enable(struct dom
 
     domain_pause(d);
     shadow_lock(d);
-
-    if ( shadow_mode_enabled(d) )
-    {
-        SHADOW_ERROR("Don't support enabling test mode"
-                      " on already shadowed doms\n");
-        ret = -EINVAL;
-        goto out;
-    }
-
     ret = shadow_one_bit_enable(d, PG_SH_enable);
- out:
     shadow_unlock(d);
     domain_unpause(d);
 
@@ -2722,10 +2710,10 @@ static int shadow_log_dirty_enable(struc
 
     if ( shadow_mode_enabled(d) )
     {
-        SHADOW_ERROR("Don't (yet) support enabling log-dirty"
-                      " on already shadowed doms\n");
-        ret = -EINVAL;
-        goto out;
+        /* This domain already has some shadows: need to clear them out 
+         * of the way to make sure that all references to guest memory are 
+         * properly write-protected */
+        shadow_blow_tables(d);
     }
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
@@ -2917,11 +2905,17 @@ void sh_mark_dirty(struct domain *d, mfn
 void sh_mark_dirty(struct domain *d, mfn_t gmfn)
 {
     unsigned long pfn;
-
-    ASSERT(shadow_locked_by_me(d));
+    int do_locking;
 
     if ( !shadow_mode_log_dirty(d) || !mfn_valid(gmfn) )
         return;
+
+    /* Although this is an externally visible function, we do not know
+     * whether the shadow lock will be held when it is called (since it
+     * can be called from __hvm_copy during emulation).
+     * If the lock isn't held, take it for the duration of the call. */
+    do_locking = !shadow_locked_by_me(d);
+    if ( do_locking ) shadow_lock(d);
 
     ASSERT(d->arch.paging.shadow.dirty_bitmap != NULL);
 
@@ -2962,13 +2956,8 @@ void sh_mark_dirty(struct domain *d, mfn
                        mfn_to_page(gmfn)->count_info, 
                        mfn_to_page(gmfn)->u.inuse.type_info);
     }
-}
-
-void shadow_mark_dirty(struct domain *d, mfn_t gmfn)
-{
-    shadow_lock(d);
-    sh_mark_dirty(d, gmfn);
-    shadow_unlock(d);
+
+    if ( do_locking ) shadow_unlock(d);
 }
 
 /**************************************************************************/
@@ -2992,9 +2981,7 @@ int shadow_domctl(struct domain *d,
         if ( shadow_mode_log_dirty(d) )
             if ( (rc = shadow_log_dirty_disable(d)) != 0 ) 
                 return rc;
-        if ( is_hvm_domain(d) )
-            return -EINVAL;
-        if ( d->arch.paging.mode & PG_SH_enable )
+        if ( d->arch.paging.mode == PG_SH_enable )
             if ( (rc = shadow_test_disable(d)) != 0 ) 
                 return rc;
         return 0;
diff -r bfd4fad0f052 -r 720afbf74001 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Feb 26 11:53:35 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Mon Feb 26 13:56:01 2007 +0000
@@ -101,14 +101,6 @@ get_fl1_shadow_status(struct vcpu *v, gf
 /* Look for FL1 shadows in the hash table */
 {
     mfn_t smfn = shadow_hash_lookup(v, gfn_x(gfn), SH_type_fl1_shadow);
-
-    if ( unlikely(shadow_mode_log_dirty(v->domain) && mfn_valid(smfn)) )
-    {
-        struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
-        if ( !(sp->logdirty) )
-            shadow_convert_to_log_dirty(v, smfn);
-    }
-
     return smfn;
 }
 
@@ -118,14 +110,6 @@ get_shadow_status(struct vcpu *v, mfn_t 
 {
     mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type);
     perfc_incrc(shadow_get_shadow_status);
-
-    if ( unlikely(shadow_mode_log_dirty(v->domain) && mfn_valid(smfn)) )
-    {
-        struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
-        if ( !(sp->logdirty) )
-            shadow_convert_to_log_dirty(v, smfn);
-    }
-
     return smfn;
 }
 
@@ -136,12 +120,6 @@ set_fl1_shadow_status(struct vcpu *v, gf
     SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
                    gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
 
-    if ( unlikely(shadow_mode_log_dirty(v->domain)) )
-        // mark this shadow as a log dirty shadow...
-        mfn_to_shadow_page(smfn)->logdirty = 1;
-    else
-        mfn_to_shadow_page(smfn)->logdirty = 0;
-
     shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
 }
 
@@ -155,12 +133,6 @@ set_shadow_status(struct vcpu *v, mfn_t 
     SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, type=%08x, smfn=%05lx\n",
                    d->domain_id, v->vcpu_id, mfn_x(gmfn),
                    shadow_type, mfn_x(smfn));
-
-    if ( unlikely(shadow_mode_log_dirty(d)) )
-        // mark this shadow as a log dirty shadow...
-        mfn_to_shadow_page(smfn)->logdirty = 1;
-    else
-        mfn_to_shadow_page(smfn)->logdirty = 0;
 
 #ifdef CONFIG_COMPAT
     if ( !IS_COMPAT(d) || shadow_type != SH_type_l4_64_shadow )
@@ -3994,6 +3966,8 @@ sh_x86_emulate_write(struct vcpu *v, uns
     /* If we are writing zeros to this page, might want to unshadow */
     if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
         check_for_early_unshadow(v, mfn);
+    
+    sh_mark_dirty(v->domain, mfn);
 
     sh_unmap_domain_page(addr);
     shadow_audit_tables(v);
@@ -4047,6 +4021,8 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
         check_for_early_unshadow(v, mfn);
 
+    sh_mark_dirty(v->domain, mfn);
+
     sh_unmap_domain_page(addr);
     shadow_audit_tables(v);
     return rv;
@@ -4086,6 +4062,8 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     /* If we are writing zeros to this page, might want to unshadow */
     if ( *(u32 *)addr == 0 )
         check_for_early_unshadow(v, mfn);
+
+    sh_mark_dirty(v->domain, mfn);
 
     sh_unmap_domain_page(addr);
     shadow_audit_tables(v);
diff -r bfd4fad0f052 -r 720afbf74001 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Mon Feb 26 11:53:35 2007 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Mon Feb 26 13:56:01 2007 +0000
@@ -229,8 +229,7 @@ struct shadow_page_info
     struct {
         unsigned int type:4;      /* What kind of shadow is this? */
         unsigned int pinned:1;    /* Is the shadow pinned? */
-        unsigned int logdirty:1;  /* Was it made in log-dirty mode? */
-        unsigned int count:26;    /* Reference count */
+        unsigned int count:27;    /* Reference count */
         u32 mbz;                  /* Must be zero: this is where the owner 
                                    * field lives in a non-shadow page */
     } __attribute__((packed));
diff -r bfd4fad0f052 -r 720afbf74001 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Mon Feb 26 11:53:35 2007 +0000
+++ b/xen/include/asm-x86/shadow.h      Mon Feb 26 13:56:01 2007 +0000
@@ -87,12 +87,13 @@ void shadow_final_teardown(struct domain
 
 /* Mark a page as dirty in the log-dirty bitmap: called when Xen 
  * makes changes to guest memory on its behalf. */
-void shadow_mark_dirty(struct domain *d, mfn_t gmfn);
+void sh_mark_dirty(struct domain *d, mfn_t gmfn);
 /* Cleaner version so we don't pepper shadow_mode tests all over the place */
 static inline void mark_dirty(struct domain *d, unsigned long gmfn)
 {
     if ( unlikely(shadow_mode_log_dirty(d)) )
-        shadow_mark_dirty(d, _mfn(gmfn));
+        /* See the comment about locking in sh_mark_dirty */
+        sh_mark_dirty(d, _mfn(gmfn));
 }
 
 /* Update all the things that are derived from the guest's CR0/CR3/CR4.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Allow log-dirty mode to be enabled on already-shadowed domains., Xen patchbot-unstable <=