Hi all,
currently HAP systems suffer a significant performance loss when a vnc
client is connect or the sdl interface is used, because HAP is lacking
an implementation of track_dirty_vram.
As a consequence qemu always tries to update the whole screen because it
does not know which areas of the screen have been updated by the guest.
This patch implements track_dirty_vram for HAP enabling the logdirty
mechanism only in a specific gfn range and adding a
paging_log_dirty_range function that returns the log dirty bitmap in a
requested range.
Paging_log_dirty_range is different from paging_log_dirty_op because
operates on a range and also because it does not pause the domain. In
order not to lose any update I moved clean_dirty_bitmap at the beginning
of the function before evaluating the logdirty bitmap.
The bitmap is still safe because it is protected by the logdirty lock.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
diff -r 50cf07f42fdd xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu Jun 04 10:57:39 2009 +0100
+++ b/xen/arch/x86/hvm/hvm.c Thu Jun 04 18:47:04 2009 +0100
@@ -34,6 +34,7 @@
#include <xen/event.h>
#include <xen/paging.h>
#include <asm/shadow.h>
+#include <asm/hap.h>
#include <asm/current.h>
#include <asm/e820.h>
#include <asm/io.h>
@@ -2653,12 +2654,13 @@
goto param_fail2;
rc = -EINVAL;
- if ( !shadow_mode_enabled(d))
- goto param_fail2;
if ( d->vcpu[0] == NULL )
goto param_fail2;
- rc = shadow_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
+ if ( shadow_mode_enabled(d) )
+ rc = shadow_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
+ else
+ rc = hap_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
param_fail2:
rcu_unlock_domain(d);
diff -r 50cf07f42fdd xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu Jun 04 10:57:39 2009 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Thu Jun 04 18:47:04 2009 +0100
@@ -52,8 +52,140 @@
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
/************************************************/
+/* HAP VRAM TRACKING SUPPORT */
+/************************************************/
+
+int hap_enable_vram_tracking(struct domain *d)
+{
+ int i;
+
+ if ( !d->dirty_vram )
+ return -EINVAL;
+
+ /* turn on PG_log_dirty bit in paging mode */
+ hap_lock(d);
+ d->arch.paging.mode |= PG_log_dirty;
+ hap_unlock(d);
+
+ /* set l1e entries of P2M table to be read-only. */
+ for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++)
+ p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
+
+ flush_tlb_mask(&d->domain_dirty_cpumask);
+ return 0;
+}
+
+int hap_disable_vram_tracking(struct domain *d)
+{
+ int i;
+
+ if ( !d->dirty_vram )
+ return -EINVAL;
+
+ hap_lock(d);
+ d->arch.paging.mode &= ~PG_log_dirty;
+ hap_unlock(d);
+
+ /* set l1e entries of P2M table with normal mode */
+ for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++)
+ p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
+
+ flush_tlb_mask(&d->domain_dirty_cpumask);
+ return 0;
+}
+
+void hap_clean_vram_tracking(struct domain *d)
+{
+ int i;
+
+ if ( !d->dirty_vram )
+ return;
+
+ /* set l1e entries of P2M table to be read-only. */
+ for (i = d->dirty_vram->begin_pfn; i < d->dirty_vram->end_pfn; i++)
+ p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
+
+ flush_tlb_mask(&d->domain_dirty_cpumask);
+}
+
+void hap_vram_tracking_init(struct domain *d)
+{
+ paging_log_dirty_init(d, hap_enable_vram_tracking,
+ hap_disable_vram_tracking,
+ hap_clean_vram_tracking);
+}
+
+int hap_track_dirty_vram(struct domain *d,
+ unsigned long begin_pfn,
+ unsigned long nr,
+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
+{
+ long rc = 0;
+
+ if ( nr )
+ {
+ if ( paging_mode_log_dirty(d) && d->dirty_vram )
+ {
+ if ( begin_pfn != d->dirty_vram->begin_pfn ||
+ begin_pfn + nr != d->dirty_vram->end_pfn )
+ {
+ paging_log_dirty_disable(d);
+ d->dirty_vram->begin_pfn = begin_pfn;
+ d->dirty_vram->end_pfn = begin_pfn + nr;
+ rc = paging_log_dirty_enable(d);
+ if (rc != 0)
+ goto param_fail;
+ }
+ }
+ else if ( !paging_mode_log_dirty(d) && !d->dirty_vram )
+ {
+ rc -ENOMEM;
+ if ( (d->dirty_vram = xmalloc(struct sh_dirty_vram)) == NULL )
+ goto param_fail;
+
+ d->dirty_vram->begin_pfn = begin_pfn;
+ d->dirty_vram->end_pfn = begin_pfn + nr;
+ hap_vram_tracking_init(d);
+ rc = paging_log_dirty_enable(d);
+ if (rc != 0)
+ goto param_fail;
+ }
+ else
+ {
+ if ( !paging_mode_log_dirty(d) && d->dirty_vram )
+ rc = -EINVAL;
+ else
+ rc = -ENODATA;
+ goto param_fail;
+ }
+ /* get the bitmap */
+ rc = paging_log_dirty_range(d, begin_pfn, nr, dirty_bitmap);
+ }
+ else
+ {
+ if ( paging_mode_log_dirty(d) && d->dirty_vram ) {
+ rc = paging_log_dirty_disable(d);
+ xfree(d->dirty_vram);
+ d->dirty_vram = NULL;
+ } else
+ rc = 0;
+ }
+
+ return rc;
+
+param_fail:
+ if ( d->dirty_vram )
+ {
+ xfree(d->dirty_vram);
+ d->dirty_vram = NULL;
+ }
+ return rc;
+}
+
+/************************************************/
/* HAP LOG DIRTY SUPPORT */
/************************************************/
+
/* hap code to call when log_dirty is enable. return 0 if no problem found. */
int hap_enable_log_dirty(struct domain *d)
{
@@ -84,6 +216,21 @@
/* set l1e entries of P2M table to be read-only. */
p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
flush_tlb_mask(&d->domain_dirty_cpumask);
+}
+
+void hap_logdirty_init(struct domain *d)
+{
+ if ( paging_mode_log_dirty(d) && d->dirty_vram )
+ {
+ paging_log_dirty_disable(d);
+ xfree(d->dirty_vram);
+ d->dirty_vram = NULL;
+ }
+
+ /* Reinitialize logdirty mechanism */
+ paging_log_dirty_init(d, hap_enable_log_dirty,
+ hap_disable_log_dirty,
+ hap_clean_dirty_bitmap);
}
/************************************************/
@@ -390,10 +537,6 @@
{
hap_lock_init(d);
INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
-
- /* This domain will use HAP for log-dirty mode */
- paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
- hap_clean_dirty_bitmap);
}
/* return 0 for success, -errno for failure */
diff -r 50cf07f42fdd xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c Thu Jun 04 10:57:39 2009 +0100
+++ b/xen/arch/x86/mm/paging.c Thu Jun 04 18:47:04 2009 +0100
@@ -453,6 +453,157 @@
return rv;
}
+int paging_log_dirty_range(struct domain *d,
+ unsigned long begin_pfn,
+ unsigned long nr,
+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
+{
+ int rv = 0;
+ unsigned long pages = 0;
+ mfn_t *l4, *l3, *l2;
+ unsigned long *l1;
+ int b1, b2, b3, b4;
+ int i2, i3, i4;
+
+ d->arch.paging.log_dirty.clean_dirty_bitmap(d);
+ log_dirty_lock(d);
+
+ PAGING_DEBUG(LOGDIRTY, "log-dirty-range: dom %u faults=%u dirty=%u\n",
+ d->domain_id,
+ d->arch.paging.log_dirty.fault_count,
+ d->arch.paging.log_dirty.dirty_count);
+
+ if ( !mfn_valid(d->arch.paging.log_dirty.top) )
+ {
+ rv = -EINVAL; /* perhaps should be ENOMEM? */
+ goto out;
+ }
+
+ if ( unlikely(d->arch.paging.log_dirty.failed_allocs) ) {
+ printk("%s: %d failed page allocs while logging dirty pages\n",
+ __FUNCTION__, d->arch.paging.log_dirty.failed_allocs);
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ if ( !d->arch.paging.log_dirty.fault_count &&
+ !d->arch.paging.log_dirty.dirty_count ) {
+ int size = (nr + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ unsigned long zeroes[size];
+ memset(zeroes, 0x00, size * BYTES_PER_LONG);
+ rv = 0;
+ if ( copy_to_guest_offset(dirty_bitmap, 0, (uint8_t *) zeroes,
+ size * BYTES_PER_LONG) != 0 )
+ rv = -EFAULT;
+ goto out;
+ }
+ d->arch.paging.log_dirty.fault_count = 0;
+ d->arch.paging.log_dirty.dirty_count = 0;
+
+ b1 = L1_LOGDIRTY_IDX(begin_pfn);
+ b2 = L2_LOGDIRTY_IDX(begin_pfn);
+ b3 = L3_LOGDIRTY_IDX(begin_pfn);
+ b4 = L4_LOGDIRTY_IDX(begin_pfn);
+ l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+
+ for ( i4 = b4;
+ (pages < nr) && (i4 < LOGDIRTY_NODE_ENTRIES);
+ i4++ )
+ {
+ l3 = mfn_valid(l4[i4]) ? map_domain_page(mfn_x(l4[i4])) : NULL;
+ for ( i3 = b3;
+ (pages < nr) && (i3 < LOGDIRTY_NODE_ENTRIES);
+ i3++ )
+ {
+ l2 = ((l3 && mfn_valid(l3[i3])) ?
+ map_domain_page(mfn_x(l3[i3])) : NULL);
+ for ( i2 = b2;
+ (pages < nr) && (i2 < LOGDIRTY_NODE_ENTRIES);
+ i2++ )
+ {
+ static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG];
+ unsigned int bytes = PAGE_SIZE;
+ uint8_t *s;
+ l1 = ((l2 && mfn_valid(l2[i2])) ?
+ map_domain_page(mfn_x(l2[i2])) : zeroes);
+
+ s = ((uint8_t*)l1) + (b1 >> 3);
+ bytes -= b1 >> 3;
+
+ if ( likely(((nr - pages + 7) >> 3) < bytes) )
+ bytes = (unsigned int)((nr - pages + 7) >> 3);
+
+ /* begin_pfn is not 32K aligned, hence we have to bit
+ * shift the bitmap */
+ if ( b1 & 0x7 )
+ {
+ int i, j;
+ uint32_t *l = (uint32_t*) s;
+ int bits = b1 & 0x7;
+ int bitmask = (1 << bits) - 1;
+ int size = (bytes + BYTES_PER_LONG - 1) / BYTES_PER_LONG;
+ unsigned long bitmap[size];
+ static unsigned long printed = 0;
+
+ if ( printed != begin_pfn )
+ {
+ dprintk(XENLOG_DEBUG, "%s: begin_pfn %lx is not 32K
aligned!\n",
+ __FUNCTION__, begin_pfn);
+ printed = begin_pfn;
+ }
+
+ for ( i = 0; i < size - 1; i++, l++ ) {
+ bitmap[i] = ((*l) >> bits) |
+ (((*((uint8_t*)(l + 1))) & bitmask) << (sizeof(*l)
* 8 - bits));
+ }
+ s = (uint8_t*) l;
+ size = BYTES_PER_LONG - ((b1 >> 3) & 0x3);
+ bitmap[i] = 0;
+ for ( j = 0; j < size; j++, s++ )
+ bitmap[i] |= (*s) << (j * 8);
+ bitmap[i] = (bitmap[i] >> bits) | (bitmask << (size * 8 -
bits));
+ if ( copy_to_guest_offset(dirty_bitmap, (pages >> 3),
+ (uint8_t*) bitmap, bytes) != 0 )
+ {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+ else
+ {
+ if ( copy_to_guest_offset(dirty_bitmap, pages >> 3,
+ s, bytes) != 0 )
+ {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ if ( l1 != zeroes )
+ clear_page(l1);
+ pages += bytes << 3;
+ if ( l1 != zeroes )
+ unmap_domain_page(l1);
+ b1 = b1 & 0x7;
+ }
+ b2 = 0;
+ if ( l2 )
+ unmap_domain_page(l2);
+ }
+ b3 = 0;
+ if ( l3 )
+ unmap_domain_page(l3);
+ }
+ unmap_domain_page(l4);
+
+ log_dirty_unlock(d);
+
+ return rv;
+
+ out:
+ log_dirty_unlock(d);
+ return rv;
+}
/* Note that this function takes three function pointers. Callers must supply
* these functions for log dirty code to call. This function usually is
@@ -554,11 +705,17 @@
switch ( sc->op )
{
case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
+ if ( hap_enabled(d) )
+ hap_logdirty_init(d);
return paging_log_dirty_enable(d);
case XEN_DOMCTL_SHADOW_OP_ENABLE:
if ( sc->mode & XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY )
+ {
+ if ( hap_enabled(d) )
+ hap_logdirty_init(d);
return paging_log_dirty_enable(d);
+ }
case XEN_DOMCTL_SHADOW_OP_OFF:
if ( paging_mode_log_dirty(d) )
diff -r 50cf07f42fdd xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h Thu Jun 04 10:57:39 2009 +0100
+++ b/xen/arch/x86/mm/shadow/private.h Thu Jun 04 18:47:04 2009 +0100
@@ -590,17 +590,6 @@
}
/**************************************************************************/
-/* VRAM dirty tracking support */
-
-struct sh_dirty_vram {
- unsigned long begin_pfn;
- unsigned long end_pfn;
- paddr_t *sl1ma;
- uint8_t *dirty_bitmap;
- s_time_t last_dirty;
-};
-
-/**************************************************************************/
/* Shadow-page refcounting. */
void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
diff -r 50cf07f42fdd xen/include/asm-x86/hap.h
--- a/xen/include/asm-x86/hap.h Thu Jun 04 10:57:39 2009 +0100
+++ b/xen/include/asm-x86/hap.h Thu Jun 04 18:47:04 2009 +0100
@@ -91,6 +91,11 @@
void hap_final_teardown(struct domain *d);
void hap_teardown(struct domain *d);
void hap_vcpu_init(struct vcpu *v);
+void hap_logdirty_init(struct domain *d);
+int hap_track_dirty_vram(struct domain *d,
+ unsigned long begin_pfn,
+ unsigned long nr,
+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
extern struct paging_mode hap_paging_real_mode;
extern struct paging_mode hap_paging_protected_mode;
diff -r 50cf07f42fdd xen/include/asm-x86/paging.h
--- a/xen/include/asm-x86/paging.h Thu Jun 04 10:57:39 2009 +0100
+++ b/xen/include/asm-x86/paging.h Thu Jun 04 18:47:05 2009 +0100
@@ -139,6 +139,12 @@
/* free log dirty bitmap resource */
void paging_free_log_dirty_bitmap(struct domain *d);
+/* get the dirty bitmap for a specific range of pfns */
+int paging_log_dirty_range(struct domain *d,
+ unsigned long begin_pfn,
+ unsigned long nr,
+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
+
/* enable log dirty */
int paging_log_dirty_enable(struct domain *d);
diff -r 50cf07f42fdd xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Thu Jun 04 10:57:39 2009 +0100
+++ b/xen/include/xen/sched.h Thu Jun 04 18:47:05 2009 +0100
@@ -165,6 +165,17 @@
#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
#define domain_is_locked(d) spin_is_locked(&(d)->domain_lock)
+/* VRAM dirty tracking support */
+
+struct sh_dirty_vram {
+ unsigned long begin_pfn;
+ unsigned long end_pfn;
+ paddr_t *sl1ma;
+ uint8_t *dirty_bitmap;
+ s_time_t last_dirty;
+};
+
+/**************************************************************************/
struct domain
{
domid_t domain_id;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|