WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86 hvm: move dirty_vram into struct hvm_

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86 hvm: move dirty_vram into struct hvm_domain
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 05 Jun 2009 19:15:23 -0700
Delivery-date: Fri, 05 Jun 2009 19:16:13 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1244207043 -3600
# Node ID 8dd5c3cae086fc9eeb066117fbe131d4d4718438
# Parent  6eff3fe96aff468487a9205a7cd344aaf7d4b5e1
x86 hvm: move dirty_vram into struct hvm_domain

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/hap/hap.c        |   66 ++++++++++++++++++---------------
 xen/arch/x86/mm/shadow/common.c  |   76 ++++++++++++++++++++-------------------
 xen/arch/x86/mm/shadow/multi.c   |   49 +++++++++++++------------
 xen/include/asm-x86/hvm/domain.h |    3 +
 xen/include/xen/sched.h          |    3 -
 5 files changed, 104 insertions(+), 93 deletions(-)

diff -r 6eff3fe96aff -r 8dd5c3cae086 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Fri Jun 05 09:32:03 2009 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Fri Jun 05 14:04:03 2009 +0100
@@ -58,8 +58,9 @@ int hap_enable_vram_tracking(struct doma
 int hap_enable_vram_tracking(struct domain *d)
 {
     int i;
-
-    if ( !d->dirty_vram )
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+
+    if ( !dirty_vram )
         return -EINVAL;
 
     /* turn on PG_log_dirty bit in paging mode */
@@ -68,7 +69,7 @@ int hap_enable_vram_tracking(struct doma
     hap_unlock(d);
 
     /* set l1e entries of P2M table to be read-only. */
-    for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++)
+    for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
         p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
 
     flush_tlb_mask(&d->domain_dirty_cpumask);
@@ -78,8 +79,9 @@ int hap_disable_vram_tracking(struct dom
 int hap_disable_vram_tracking(struct domain *d)
 {
     int i;
-
-    if ( !d->dirty_vram )
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+
+    if ( !dirty_vram )
         return -EINVAL;
 
     hap_lock(d);
@@ -87,7 +89,7 @@ int hap_disable_vram_tracking(struct dom
     hap_unlock(d);
 
     /* set l1e entries of P2M table with normal mode */
-    for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++)
+    for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
         p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
 
     flush_tlb_mask(&d->domain_dirty_cpumask);
@@ -97,12 +99,13 @@ void hap_clean_vram_tracking(struct doma
 void hap_clean_vram_tracking(struct domain *d)
 {
     int i;
-
-    if ( !d->dirty_vram )
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+
+    if ( !dirty_vram )
         return;
 
     /* set l1e entries of P2M table to be read-only. */
-    for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++)
+    for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
         p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
 
     flush_tlb_mask(&d->domain_dirty_cpumask);
@@ -121,30 +124,32 @@ int hap_track_dirty_vram(struct domain *
                          XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
 {
     long rc = 0;
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
 
     if ( nr )
     {
-        if ( paging_mode_log_dirty(d) && d->dirty_vram )
-        {
-            if ( begin_pfn != d->dirty_vram->begin_pfn ||
-                 begin_pfn + nr != d->dirty_vram->end_pfn )
+        if ( paging_mode_log_dirty(d) && dirty_vram )
+        {
+            if ( begin_pfn != dirty_vram->begin_pfn ||
+                 begin_pfn + nr != dirty_vram->end_pfn )
             {
                 paging_log_dirty_disable(d);
-                d->dirty_vram->begin_pfn = begin_pfn;
-                d->dirty_vram->end_pfn = begin_pfn + nr;
+                dirty_vram->begin_pfn = begin_pfn;
+                dirty_vram->end_pfn = begin_pfn + nr;
                 rc = paging_log_dirty_enable(d);
                 if (rc != 0)
                     goto param_fail;
             }
         }
-        else if ( !paging_mode_log_dirty(d) && !d->dirty_vram )
+        else if ( !paging_mode_log_dirty(d) && !dirty_vram )
         {
             rc -ENOMEM;
-            if ( (d->dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL )
+            if ( (dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL )
                 goto param_fail;
 
-            d->dirty_vram->begin_pfn = begin_pfn;
-            d->dirty_vram->end_pfn = begin_pfn + nr;
+            dirty_vram->begin_pfn = begin_pfn;
+            dirty_vram->end_pfn = begin_pfn + nr;
+            d->arch.hvm_domain.dirty_vram = dirty_vram;
             hap_vram_tracking_init(d);
             rc = paging_log_dirty_enable(d);
             if (rc != 0)
@@ -152,7 +157,7 @@ int hap_track_dirty_vram(struct domain *
         }
         else
         {
-            if ( !paging_mode_log_dirty(d) && d->dirty_vram )
+            if ( !paging_mode_log_dirty(d) && dirty_vram )
                 rc = -EINVAL;
             else
                 rc = -ENODATA;
@@ -163,10 +168,10 @@ int hap_track_dirty_vram(struct domain *
     }
     else
     {
-        if ( paging_mode_log_dirty(d) && d->dirty_vram ) {
+        if ( paging_mode_log_dirty(d) && dirty_vram ) {
             rc = paging_log_dirty_disable(d);
-            xfree(d->dirty_vram);
-            d->dirty_vram = NULL;
+            xfree(dirty_vram);
+            dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
         } else
             rc = 0;
     }
@@ -174,10 +179,10 @@ int hap_track_dirty_vram(struct domain *
     return rc;
 
 param_fail:
-    if ( d->dirty_vram )
-    {
-        xfree(d->dirty_vram);
-        d->dirty_vram = NULL;
+    if ( dirty_vram )
+    {
+        xfree(dirty_vram);
+        dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
     }
     return rc;
 }
@@ -220,11 +225,12 @@ void hap_clean_dirty_bitmap(struct domai
 
 void hap_logdirty_init(struct domain *d)
 {
-    if ( paging_mode_log_dirty(d) && d->dirty_vram )
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+    if ( paging_mode_log_dirty(d) && dirty_vram )
     {
         paging_log_dirty_disable(d);
-        xfree(d->dirty_vram);
-        d->dirty_vram = NULL;
+        xfree(dirty_vram);
+        dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
     }
 
     /* Reinitialize logdirty mechanism */
diff -r 6eff3fe96aff -r 8dd5c3cae086 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Jun 05 09:32:03 2009 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Fri Jun 05 14:04:03 2009 +0100
@@ -3259,11 +3259,11 @@ void shadow_teardown(struct domain *d)
      * calls now that we've torn down the bitmap */
     d->arch.paging.mode &= ~PG_log_dirty;
 
-    if (d->dirty_vram) {
-        xfree(d->dirty_vram->sl1ma);
-        xfree(d->dirty_vram->dirty_bitmap);
-        xfree(d->dirty_vram);
-        d->dirty_vram = NULL;
+    if (d->arch.hvm_domain.dirty_vram) {
+        xfree(d->arch.hvm_domain.dirty_vram->sl1ma);
+        xfree(d->arch.hvm_domain.dirty_vram->dirty_bitmap);
+        xfree(d->arch.hvm_domain.dirty_vram);
+        d->arch.hvm_domain.dirty_vram = NULL;
     }
 
     shadow_unlock(d);
@@ -3583,6 +3583,7 @@ int shadow_track_dirty_vram(struct domai
     int flush_tlb = 0;
     unsigned long i;
     p2m_type_t t;
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
 
     if (end_pfn < begin_pfn
             || begin_pfn > d->arch.p2m->max_mapped_pfn
@@ -3591,16 +3592,16 @@ int shadow_track_dirty_vram(struct domai
 
     shadow_lock(d);
 
-    if ( d->dirty_vram && (!nr ||
-             ( begin_pfn != d->dirty_vram->begin_pfn
-            || end_pfn   != d->dirty_vram->end_pfn )) )
+    if ( dirty_vram && (!nr ||
+             ( begin_pfn != dirty_vram->begin_pfn
+            || end_pfn   != dirty_vram->end_pfn )) )
     {
         /* Different tracking, tear the previous down. */
-        gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", 
d->dirty_vram->begin_pfn, d->dirty_vram->end_pfn);
-        xfree(d->dirty_vram->sl1ma);
-        xfree(d->dirty_vram->dirty_bitmap);
-        xfree(d->dirty_vram);
-        d->dirty_vram = NULL;
+        gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", 
dirty_vram->begin_pfn, dirty_vram->end_pfn);
+        xfree(dirty_vram->sl1ma);
+        xfree(dirty_vram->dirty_bitmap);
+        xfree(dirty_vram);
+        dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
     }
 
     if ( !nr )
@@ -3611,7 +3612,7 @@ int shadow_track_dirty_vram(struct domai
 
     /* This should happen seldomly (Video mode change),
      * no need to be careful. */
-    if ( !d->dirty_vram )
+    if ( !dirty_vram )
     {
         /* Just recount from start. */
         for ( i = begin_pfn; i < end_pfn; i++ ) {
@@ -3623,29 +3624,30 @@ int shadow_track_dirty_vram(struct domai
         gdprintk(XENLOG_INFO, "tracking VRAM %lx - %lx\n", begin_pfn, end_pfn);
 
         rc = -ENOMEM;
-        if ( (d->dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL )
+        if ( (dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL )
             goto out;
-        d->dirty_vram->begin_pfn = begin_pfn;
-        d->dirty_vram->end_pfn = end_pfn;
-
-        if ( (d->dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL )
+        dirty_vram->begin_pfn = begin_pfn;
+        dirty_vram->end_pfn = end_pfn;
+        d->arch.hvm_domain.dirty_vram = dirty_vram;
+
+        if ( (dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL )
             goto out_dirty_vram;
-        memset(d->dirty_vram->sl1ma, ~0, sizeof(paddr_t) * nr);
-
-        if ( (d->dirty_vram->dirty_bitmap = xmalloc_array(uint8_t, 
dirty_size)) == NULL )
+        memset(dirty_vram->sl1ma, ~0, sizeof(paddr_t) * nr);
+
+        if ( (dirty_vram->dirty_bitmap = xmalloc_array(uint8_t, dirty_size)) 
== NULL )
             goto out_sl1ma;
-        memset(d->dirty_vram->dirty_bitmap, 0, dirty_size);
-
-        d->dirty_vram->last_dirty = NOW();
+        memset(dirty_vram->dirty_bitmap, 0, dirty_size);
+
+        dirty_vram->last_dirty = NOW();
 
         /* Tell the caller that this time we could not track dirty bits. */
         rc = -ENODATA;
     }
-    else if (d->dirty_vram->last_dirty == -1)
+    else if (dirty_vram->last_dirty == -1)
     {
         /* still completely clean, just copy our empty bitmap */
         rc = -EFAULT;
-        if ( copy_to_guest(dirty_bitmap, d->dirty_vram->dirty_bitmap, 
dirty_size) == 0 )
+        if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) 
== 0 )
             rc = 0;
     }
     else
@@ -3660,7 +3662,7 @@ int shadow_track_dirty_vram(struct domai
             mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t);
             struct page_info *page;
             int dirty = 0;
-            paddr_t sl1ma = d->dirty_vram->sl1ma[i];
+            paddr_t sl1ma = dirty_vram->sl1ma[i];
 
             if (mfn_x(mfn) == INVALID_MFN)
             {
@@ -3724,8 +3726,8 @@ int shadow_track_dirty_vram(struct domai
 
             if ( dirty )
             {
-                d->dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);
-                d->dirty_vram->last_dirty = NOW();
+                dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);
+                dirty_vram->last_dirty = NOW();
             }
         }
 
@@ -3735,9 +3737,9 @@ int shadow_track_dirty_vram(struct domai
 #endif
 
         rc = -EFAULT;
-        if ( copy_to_guest(dirty_bitmap, d->dirty_vram->dirty_bitmap, 
dirty_size) == 0 ) {
-            memset(d->dirty_vram->dirty_bitmap, 0, dirty_size);
-            if (d->dirty_vram->last_dirty + SECONDS(2) < NOW())
+        if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) 
== 0 ) {
+            memset(dirty_vram->dirty_bitmap, 0, dirty_size);
+            if (dirty_vram->last_dirty + SECONDS(2) < NOW())
             {
                 /* was clean for more than two seconds, try to disable guest
                  * write access */
@@ -3746,7 +3748,7 @@ int shadow_track_dirty_vram(struct domai
                     if (mfn_x(mfn) != INVALID_MFN)
                         flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 
1, 0);
                 }
-                d->dirty_vram->last_dirty = -1;
+                dirty_vram->last_dirty = -1;
             }
             rc = 0;
         }
@@ -3756,10 +3758,10 @@ int shadow_track_dirty_vram(struct domai
     goto out;
 
 out_sl1ma:
-    xfree(d->dirty_vram->sl1ma);
+    xfree(dirty_vram->sl1ma);
 out_dirty_vram:
-    xfree(d->dirty_vram);
-    d->dirty_vram = NULL;
+    xfree(dirty_vram);
+    dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
 
 out:
     shadow_unlock(d);
diff -r 6eff3fe96aff -r 8dd5c3cae086 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Fri Jun 05 09:32:03 2009 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Fri Jun 05 14:04:03 2009 +0100
@@ -475,6 +475,7 @@ _sh_propagate(struct vcpu *v,
     guest_l1e_t guest_entry = { guest_intpte };
     shadow_l1e_t *sp = shadow_entry_ptr;
     struct domain *d = v->domain;
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
     gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
     u32 pass_thru_flags;
     u32 gflags, sflags;
@@ -615,13 +616,13 @@ _sh_propagate(struct vcpu *v,
         }
     }
 
-    if ( unlikely((level == 1) && d->dirty_vram
-            && d->dirty_vram->last_dirty == -1
-            && gfn_x(target_gfn) >= d->dirty_vram->begin_pfn
-            && gfn_x(target_gfn) < d->dirty_vram->end_pfn) )
+    if ( unlikely((level == 1) && dirty_vram
+            && dirty_vram->last_dirty == -1
+            && gfn_x(target_gfn) >= dirty_vram->begin_pfn
+            && gfn_x(target_gfn) < dirty_vram->end_pfn) )
     {
         if ( ft & FETCH_TYPE_WRITE )
-            d->dirty_vram->last_dirty = NOW();
+            dirty_vram->last_dirty = NOW();
         else
             sflags &= ~_PAGE_RW;
     }
@@ -1042,22 +1043,23 @@ static inline void shadow_vram_get_l1e(s
     mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
     int flags = shadow_l1e_get_flags(new_sl1e);
     unsigned long gfn;
-
-    if ( !d->dirty_vram         /* tracking disabled? */
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+
+    if ( !dirty_vram         /* tracking disabled? */
          || !(flags & _PAGE_RW) /* read-only mapping? */
          || !mfn_valid(mfn) )   /* mfn can be invalid in mmio_direct */
         return;
 
     gfn = mfn_to_gfn(d, mfn);
 
-    if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) )
-    {
-        unsigned long i = gfn - d->dirty_vram->begin_pfn;
+    if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
+    {
+        unsigned long i = gfn - dirty_vram->begin_pfn;
         struct page_info *page = mfn_to_page(mfn);
         
         if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
             /* Initial guest reference, record it */
-            d->dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn))
+            dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn))
                 | ((unsigned long)sl1e & ~PAGE_MASK);
     }
 }
@@ -1070,17 +1072,18 @@ static inline void shadow_vram_put_l1e(s
     mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
     int flags = shadow_l1e_get_flags(old_sl1e);
     unsigned long gfn;
-
-    if ( !d->dirty_vram         /* tracking disabled? */
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+
+    if ( !dirty_vram         /* tracking disabled? */
          || !(flags & _PAGE_RW) /* read-only mapping? */
          || !mfn_valid(mfn) )   /* mfn can be invalid in mmio_direct */
         return;
 
     gfn = mfn_to_gfn(d, mfn);
 
-    if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) )
-    {
-        unsigned long i = gfn - d->dirty_vram->begin_pfn;
+    if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
+    {
+        unsigned long i = gfn - dirty_vram->begin_pfn;
         struct page_info *page = mfn_to_page(mfn);
         int dirty = 0;
         paddr_t sl1ma = pfn_to_paddr(mfn_x(sl1mfn))
@@ -1089,14 +1092,14 @@ static inline void shadow_vram_put_l1e(s
         if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
         {
             /* Last reference */
-            if ( d->dirty_vram->sl1ma[i] == INVALID_PADDR ) {
+            if ( dirty_vram->sl1ma[i] == INVALID_PADDR ) {
                 /* We didn't know it was that one, let's say it is dirty */
                 dirty = 1;
             }
             else
             {
-                ASSERT(d->dirty_vram->sl1ma[i] == sl1ma);
-                d->dirty_vram->sl1ma[i] = INVALID_PADDR;
+                ASSERT(dirty_vram->sl1ma[i] == sl1ma);
+                dirty_vram->sl1ma[i] = INVALID_PADDR;
                 if ( flags & _PAGE_DIRTY )
                     dirty = 1;
             }
@@ -1106,10 +1109,10 @@ static inline void shadow_vram_put_l1e(s
             /* We had more than one reference, just consider the page dirty. */
             dirty = 1;
             /* Check that it's not the one we recorded. */
-            if ( d->dirty_vram->sl1ma[i] == sl1ma )
+            if ( dirty_vram->sl1ma[i] == sl1ma )
             {
                 /* Too bad, we remembered the wrong one... */
-                d->dirty_vram->sl1ma[i] = INVALID_PADDR;
+                dirty_vram->sl1ma[i] = INVALID_PADDR;
             }
             else
             {
@@ -1119,8 +1122,8 @@ static inline void shadow_vram_put_l1e(s
         }
         if ( dirty )
         {
-            d->dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);
-            d->dirty_vram->last_dirty = NOW();
+            dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);
+            dirty_vram->last_dirty = NOW();
         }
     }
 }
diff -r 6eff3fe96aff -r 8dd5c3cae086 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Fri Jun 05 09:32:03 2009 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Fri Jun 05 14:04:03 2009 +0100
@@ -66,6 +66,9 @@ struct hvm_domain {
     /* Memory ranges with pinned cache attributes. */
     struct list_head       pinned_cacheattr_ranges;
 
+    /* VRAM dirty support. */
+    struct sh_dirty_vram *dirty_vram;
+
     /* If one of vcpus of this domain is in no_fill_mode or
      * mtrr/pat between vcpus is not the same, set is_in_uc_mode
      */
diff -r 6eff3fe96aff -r 8dd5c3cae086 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri Jun 05 09:32:03 2009 +0100
+++ b/xen/include/xen/sched.h   Fri Jun 05 14:04:03 2009 +0100
@@ -268,9 +268,6 @@ struct domain
      */
     spinlock_t hypercall_deadlock_mutex;
 
-    /* VRAM dirty support. */
-    struct sh_dirty_vram *dirty_vram;
-
     /* transcendent memory, auto-allocated on first tmem op by each domain */
     void *tmem;
 };

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86 hvm: move dirty_vram into struct hvm_domain, Xen patchbot-unstable <=