[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 03/12] libxenguest: short-circuit "all-dirty" handling
For one it is unnecessary to fill a perhaps large chunk of memory with all ones. Add a new parameter to send_dirty_pages() for callers to indicate so. Then it is further unnecessary to allocate the dirty bitmap altogether when all that's ever going to happen is a single all-dirty run. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/tools/libs/guest/xg_sr_save.c +++ b/tools/libs/guest/xg_sr_save.c @@ -368,7 +368,7 @@ static int suspend_domain(struct xc_sr_c * Bitmap is bounded by p2m_size. */ static int send_dirty_pages(struct xc_sr_context *ctx, - unsigned long entries) + unsigned long entries, bool all_dirty) { xc_interface *xch = ctx->xch; xen_pfn_t p; @@ -379,7 +379,7 @@ static int send_dirty_pages(struct xc_sr for ( p = 0, written = 0; p < ctx->save.p2m_size; ++p ) { - if ( !test_bit(p, dirty_bitmap) ) + if ( !all_dirty && !test_bit(p, dirty_bitmap) ) continue; rc = add_to_batch(ctx, p); @@ -411,12 +411,7 @@ static int send_dirty_pages(struct xc_sr */ static int send_all_pages(struct xc_sr_context *ctx) { - DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap, - &ctx->save.dirty_bitmap_hbuf); - - bitmap_set(dirty_bitmap, ctx->save.p2m_size); - - return send_dirty_pages(ctx, ctx->save.p2m_size); + return send_dirty_pages(ctx, ctx->save.p2m_size, true /* all_dirty */); } static int enable_logdirty(struct xc_sr_context *ctx) @@ -508,9 +503,6 @@ static int send_memory_live(struct xc_sr int rc; int policy_decision; - DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap, - &ctx->save.dirty_bitmap_hbuf); - precopy_policy_t precopy_policy = ctx->save.callbacks->precopy_policy; void *data = ctx->save.callbacks->data; @@ -528,8 +520,6 @@ static int send_memory_live(struct xc_sr if ( precopy_policy == NULL ) precopy_policy = simple_precopy_policy; - bitmap_set(dirty_bitmap, ctx->save.p2m_size); - for ( ; ; ) { policy_decision = precopy_policy(*policy_stats, data); @@ -541,7 +531,7 @@ static int send_memory_live(struct xc_sr if ( rc ) goto out; - rc = send_dirty_pages(ctx, stats.dirty_count); + rc = send_dirty_pages(ctx, stats.dirty_count, x == 1); if ( rc ) goto out; } @@ -687,7 +677,8 @@ static int suspend_and_send_dirty(struct } } - rc = send_dirty_pages(ctx, stats.dirty_count + ctx->save.nr_deferred_pages); + rc = send_dirty_pages(ctx, stats.dirty_count + ctx->save.nr_deferred_pages, + false /* all_dirty */); if ( rc ) goto out; @@ -807,8 +798,11 @@ static int setup(struct xc_sr_context *c if ( rc ) goto err; - dirty_bitmap = xc_hypercall_buffer_alloc_pages( - xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size))); + dirty_bitmap = ctx->save.live || ctx->stream_type != XC_STREAM_PLAIN + ? xc_hypercall_buffer_alloc_pages( + xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size))) + : (void *)-1L; + ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.batch_pfns)); ctx->save.deferred_pages = bitmap_alloc(ctx->save.p2m_size);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |