[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v20210616 35/36] tools: use sr_bitmap for populated_pfns
Signed-off-by: Olaf Hering <olaf@xxxxxxxxx> v02: - remove xg_ prefix from called functions --- tools/libs/saverestore/common.h | 21 +++++++- tools/libs/saverestore/restore.c | 69 ------------------------ tools/libs/saverestore/restore_x86_hvm.c | 9 ++++ tools/libs/saverestore/restore_x86_pv.c | 7 +++ 4 files changed, 35 insertions(+), 71 deletions(-) diff --git a/tools/libs/saverestore/common.h b/tools/libs/saverestore/common.h index 43aa1a7b86..43a31f9aa5 100644 --- a/tools/libs/saverestore/common.h +++ b/tools/libs/saverestore/common.h @@ -403,8 +403,7 @@ struct xc_sr_context uint32_t xenstore_domid, console_domid; /* Bitmap of currently populated PFNs during restore. */ - unsigned long *populated_pfns; - xen_pfn_t max_populated_pfn; + struct sr_bitmap populated_pfns; /* Sender has invoked verify mode on the stream. */ bool verify; @@ -629,6 +628,24 @@ static inline bool page_type_has_stream_data(uint32_t type) } return ret; } + +static inline bool pfn_is_populated(struct xc_sr_context *ctx, xen_pfn_t pfn) +{ + return sr_test_bit(pfn, &ctx->restore.populated_pfns); +} + +static inline int pfn_set_populated(struct xc_sr_context *ctx, xen_pfn_t pfn) +{ + xc_interface *xch = ctx->xch; + + if ( sr_set_bit(pfn, &ctx->restore.populated_pfns) == false ) + { + PERROR("Failed to realloc populated_pfns bitmap"); + errno = ENOMEM; + return -1; + } + return 0; +} #endif /* * Local variables: diff --git a/tools/libs/saverestore/restore.c b/tools/libs/saverestore/restore.c index d0148606bf..8f7bce2585 100644 --- a/tools/libs/saverestore/restore.c +++ b/tools/libs/saverestore/restore.c @@ -71,64 +71,6 @@ static int read_headers(struct xc_sr_context *ctx) return 0; } -/* - * Is a pfn populated? - */ -static bool pfn_is_populated(const struct xc_sr_context *ctx, xen_pfn_t pfn) -{ - if ( pfn > ctx->restore.max_populated_pfn ) - return false; - return test_bit(pfn, ctx->restore.populated_pfns); -} - -/* - * Set a pfn as populated, expanding the tracking structures if needed. To - * avoid realloc()ing too excessively, the size increased to the nearest power - * of two large enough to contain the required pfn. - */ -static int pfn_set_populated(struct xc_sr_context *ctx, xen_pfn_t pfn) -{ - xc_interface *xch = ctx->xch; - - if ( pfn > ctx->restore.max_populated_pfn ) - { - xen_pfn_t new_max; - size_t old_sz, new_sz; - unsigned long *p; - - /* Round up to the nearest power of two larger than pfn, less 1. */ - new_max = pfn; - new_max |= new_max >> 1; - new_max |= new_max >> 2; - new_max |= new_max >> 4; - new_max |= new_max >> 8; - new_max |= new_max >> 16; -#ifdef __x86_64__ - new_max |= new_max >> 32; -#endif - - old_sz = bitmap_size(ctx->restore.max_populated_pfn + 1); - new_sz = bitmap_size(new_max + 1); - p = realloc(ctx->restore.populated_pfns, new_sz); - if ( !p ) - { - ERROR("Failed to realloc populated bitmap"); - errno = ENOMEM; - return -1; - } - - memset((uint8_t *)p + old_sz, 0x00, new_sz - old_sz); - - ctx->restore.populated_pfns = p; - ctx->restore.max_populated_pfn = new_max; - } - - assert(!test_bit(pfn, ctx->restore.populated_pfns)); - set_bit(pfn, ctx->restore.populated_pfns); - - return 0; -} - /* * Given a set of pfns, obtain memory from Xen to fill the physmap for the * unpopulated subset. If types is NULL, no page type checking is performed @@ -929,16 +871,6 @@ static int setup(struct xc_sr_context *ctx) if ( rc ) goto err; - ctx->restore.max_populated_pfn = (32 * 1024 / 4) - 1; - ctx->restore.populated_pfns = bitmap_alloc( - ctx->restore.max_populated_pfn + 1); - if ( !ctx->restore.populated_pfns ) - { - ERROR("Unable to allocate memory for populated_pfns bitmap"); - rc = -1; - goto err; - } - ctx->restore.buffered_records = malloc( DEFAULT_BUF_RECORDS * sizeof(struct xc_sr_record)); if ( !ctx->restore.buffered_records ) @@ -977,7 +909,6 @@ static void cleanup(struct xc_sr_context *ctx) free(ctx->restore.m); free(ctx->restore.buffered_records); - free(ctx->restore.populated_pfns); if ( ctx->restore.ops.cleanup(ctx) ) PERROR("Failed to clean up"); diff --git a/tools/libs/saverestore/restore_x86_hvm.c b/tools/libs/saverestore/restore_x86_hvm.c index bd63bd2818..97e7e0f48c 100644 --- a/tools/libs/saverestore/restore_x86_hvm.c +++ b/tools/libs/saverestore/restore_x86_hvm.c @@ -136,6 +136,7 @@ static int x86_hvm_localise_page(struct xc_sr_context *ctx, static int x86_hvm_setup(struct xc_sr_context *ctx) { xc_interface *xch = ctx->xch; + unsigned long max_pfn; if ( ctx->restore.guest_type != DHDR_TYPE_X86_HVM ) { @@ -161,6 +162,13 @@ static int x86_hvm_setup(struct xc_sr_context *ctx) } #endif + max_pfn = max(ctx->restore.p2m_size, ctx->dominfo.max_memkb >> (PAGE_SHIFT-10)); + if ( !sr_bitmap_expand(&ctx->restore.populated_pfns, max_pfn) ) + { + PERROR("Unable to allocate memory for populated_pfns bitmap"); + return -1; + } + return 0; } @@ -241,6 +249,7 @@ static int x86_hvm_stream_complete(struct xc_sr_context *ctx) static int x86_hvm_cleanup(struct xc_sr_context *ctx) { + sr_bitmap_free(&ctx->restore.populated_pfns); free(ctx->x86.hvm.restore.context.ptr); free(ctx->x86.restore.cpuid.ptr); diff --git a/tools/libs/saverestore/restore_x86_pv.c b/tools/libs/saverestore/restore_x86_pv.c index 96608e5231..c73a3cd99f 100644 --- a/tools/libs/saverestore/restore_x86_pv.c +++ b/tools/libs/saverestore/restore_x86_pv.c @@ -1060,6 +1060,12 @@ static int x86_pv_setup(struct xc_sr_context *ctx) if ( rc ) return rc; + if ( !sr_bitmap_expand(&ctx->restore.populated_pfns, 32 * 1024 / 4) ) + { + PERROR("Unable to allocate memory for populated_pfns bitmap"); + return -1; + } + ctx->x86.pv.restore.nr_vcpus = ctx->dominfo.max_vcpu_id + 1; ctx->x86.pv.restore.vcpus = calloc(sizeof(struct xc_sr_x86_pv_restore_vcpu), ctx->x86.pv.restore.nr_vcpus); @@ -1153,6 +1159,7 @@ static int x86_pv_stream_complete(struct xc_sr_context *ctx) */ static int x86_pv_cleanup(struct xc_sr_context *ctx) { + sr_bitmap_free(&ctx->restore.populated_pfns); free(ctx->x86.pv.p2m); free(ctx->x86.pv.p2m_pfns);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |