# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1188482320 -3600
# Node ID df860912f348dce37ed4d42702d9dfe3f71b9b20
# Parent 104aba61893e1362a8ac877c34f310c681042f4a
Optimize xc_core.c implementation.
Optimize out unnecessary map/unmap foreign domain page
by moving p2m/pfn talbe after pages array.
This patch doesn't change the xm dump-core format.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
tools/libxc/xc_core.c | 259 +++++++++++++++++++++++++-------------------------
tools/libxc/xc_core.h | 2
2 files changed, 136 insertions(+), 125 deletions(-)
diff -r 104aba61893e -r df860912f348 tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c Thu Aug 30 14:57:24 2007 +0100
+++ b/tools/libxc/xc_core.c Thu Aug 30 14:58:40 2007 +0100
@@ -17,8 +17,8 @@
* | .xen_prstatus |
* | .xen_ia64_mmapped_regs if ia64 |
* | .xen_shared_info if present |
+ * | .xen_pages |
* | .xen_p2m or .xen_pfn |
- * | .xen_pages |
* +--------------------------------------------------------+
* |.note.Xen:note section |
* | "Xen" is used as note name, |
@@ -37,12 +37,12 @@
* +--------------------------------------------------------+
* |.xen_shared_info if possible |
* +--------------------------------------------------------+
+ * |.xen_pages |
+ * | page * nr_pages |
+ * +--------------------------------------------------------+
* |.xen_p2m or .xen_pfn |
* | .xen_p2m: struct xen_dumpcore_p2m[nr_pages] |
* | .xen_pfn: uint64_t[nr_pages] |
- * +--------------------------------------------------------+
- * |.xen_pages |
- * | page * nr_pages |
* +--------------------------------------------------------+
* |.shstrtab: section header string table |
* +--------------------------------------------------------+
@@ -57,21 +57,6 @@
/* number of pages to write at a time */
#define DUMP_INCREMENT (4 * 1024)
-
-static int
-copy_from_domain_page(int xc_handle,
- uint32_t domid,
- unsigned long mfn,
- void *dst_page)
-{
- void *vaddr = xc_map_foreign_range(
- xc_handle, domid, PAGE_SIZE, PROT_READ, mfn);
- if ( vaddr == NULL )
- return -1;
- memcpy(dst_page, vaddr, PAGE_SIZE);
- munmap(vaddr, PAGE_SIZE);
- return 0;
-}
/* string table */
struct xc_core_strtab {
@@ -520,47 +505,6 @@ xc_domain_dumpcore_via_callback(int xc_h
}
}
- /* create .xen_p2m or .xen_pfn */
- j = 0;
- for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
- {
- uint64_t pfn_start;
- uint64_t pfn_end;
-
- pfn_start = memory_map[map_idx].addr >> PAGE_SHIFT;
- pfn_end = pfn_start + (memory_map[map_idx].size >> PAGE_SHIFT);
- for ( i = pfn_start; i < pfn_end; i++ )
- {
- if ( !auto_translated_physmap )
- {
- if ( p2m[i] == INVALID_P2M_ENTRY )
- continue;
- p2m_array[j].pfn = i;
- p2m_array[j].gmfn = p2m[i];
- }
- else
- {
- /* try to map page to determin wheter it has underlying page */
- void *vaddr = xc_map_foreign_range(xc_handle, domid,
- PAGE_SIZE, PROT_READ, i);
- if ( vaddr == NULL )
- continue;
- munmap(vaddr, PAGE_SIZE);
- pfn_array[j] = i;
- }
-
- j++;
- }
- }
- if ( j != nr_pages )
- {
- PERROR("j (%ld) != nr_pages (%ld)", j , nr_pages);
- /* When live dump-mode (-L option) is specified,
- * guest domain may change its mapping.
- */
- nr_pages = j;
- }
-
/* ehdr.e_shnum and ehdr.e_shstrndx aren't known here yet. fill it later*/
xc_core_ehdr_init(&ehdr);
@@ -660,48 +604,12 @@ xc_domain_dumpcore_via_callback(int xc_h
offset += filesz;
}
- /* p2m/pfn table */
- shdr = xc_core_shdr_get(sheaders);
- if ( shdr == NULL )
- {
- PERROR("Could not get section header for .xen_{p2m, pfn} table");
- goto out;
- }
- if ( !auto_translated_physmap )
- {
- filesz = nr_pages * sizeof(p2m_array[0]);
- sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_P2M,
- SHT_PROGBITS,
- offset, filesz, __alignof__(p2m_array[0]),
- sizeof(p2m_array[0]));
- if ( sts != 0 )
- goto out;
- }
- else
- {
- filesz = nr_pages * sizeof(pfn_array[0]);
- sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_PFN,
- SHT_PROGBITS,
- offset, filesz, __alignof__(pfn_array[0]),
- sizeof(pfn_array[0]));
- if ( sts != 0 )
- goto out;
- }
- offset += filesz;
-
- /* pages */
- shdr = xc_core_shdr_get(sheaders);
- if ( shdr == NULL )
- {
- PERROR("could not get section headers for .xen_pages");
- goto out;
- }
-
/*
- * pages are the last section to allocate section headers
+ * pages and p2m/pfn are the last section to allocate section headers
* so that we know the number of section headers here.
+ * 2 = pages section and p2m/pfn table section
*/
- fixup = sheaders->num * sizeof(*shdr);
+ fixup = (sheaders->num + 2) * sizeof(*shdr);
/* zeroth section should have zero offset */
for ( i = 1; i < sheaders->num; i++ )
sheaders->shdrs[i].sh_offset += fixup;
@@ -709,9 +617,43 @@ xc_domain_dumpcore_via_callback(int xc_h
dummy_len = ROUNDUP(offset, PAGE_SHIFT) - offset; /* padding length */
offset += dummy_len;
+ /* pages */
+ shdr = xc_core_shdr_get(sheaders);
+ if ( shdr == NULL )
+ {
+ PERROR("could not get section headers for .xen_pages");
+ goto out;
+ }
filesz = nr_pages * PAGE_SIZE;
sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_PAGES, SHT_PROGBITS,
offset, filesz, PAGE_SIZE, PAGE_SIZE);
+ if ( sts != 0 )
+ goto out;
+ offset += filesz;
+
+ /* p2m/pfn table */
+ shdr = xc_core_shdr_get(sheaders);
+ if ( shdr == NULL )
+ {
+ PERROR("Could not get section header for .xen_{p2m, pfn} table");
+ goto out;
+ }
+ if ( !auto_translated_physmap )
+ {
+ filesz = nr_pages * sizeof(p2m_array[0]);
+ sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_P2M,
+ SHT_PROGBITS,
+ offset, filesz, __alignof__(p2m_array[0]),
+ sizeof(p2m_array[0]));
+ }
+ else
+ {
+ filesz = nr_pages * sizeof(pfn_array[0]);
+ sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_PFN,
+ SHT_PROGBITS,
+ offset, filesz, __alignof__(pfn_array[0]),
+ sizeof(pfn_array[0]));
+ }
if ( sts != 0 )
goto out;
offset += filesz;
@@ -736,7 +678,7 @@ xc_domain_dumpcore_via_callback(int xc_h
/* elf note section: xen core header */
sts = elfnote_dump_none(args, dump_rtn);
- if ( sts != 0)
+ if ( sts != 0 )
goto out;
/* elf note section: xen core header */
@@ -772,16 +714,6 @@ xc_domain_dumpcore_via_callback(int xc_h
if ( sts != 0 )
goto out;
- /* p2m/pfn table: .xen_p2m/.xen_pfn */
- if ( !auto_translated_physmap )
- sts = dump_rtn(args, (char *)p2m_array,
- sizeof(p2m_array[0]) * nr_pages);
- else
- sts = dump_rtn(args, (char *)pfn_array,
- sizeof(pfn_array[0]) * nr_pages);
- if ( sts != 0 )
- goto out;
-
/* Pad the output data to page alignment. */
memset(dummy, 0, PAGE_SIZE);
sts = dump_rtn(args, dummy, dummy_len);
@@ -789,24 +721,99 @@ xc_domain_dumpcore_via_callback(int xc_h
goto out;
/* dump pages: .xen_pages */
- for ( dump_mem = dump_mem_start, i = 0; i < nr_pages; i++ )
- {
- uint64_t gmfn;
- if ( !auto_translated_physmap )
- gmfn = p2m_array[i].gmfn;
- else
- gmfn = pfn_array[i];
-
- copy_from_domain_page(xc_handle, domid, gmfn, dump_mem);
- dump_mem += PAGE_SIZE;
- if ( ((i + 1) % DUMP_INCREMENT == 0) || ((i + 1) == nr_pages) )
+ j = 0;
+ dump_mem = dump_mem_start;
+ for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+ {
+ uint64_t pfn_start;
+ uint64_t pfn_end;
+
+ pfn_start = memory_map[map_idx].addr >> PAGE_SHIFT;
+ pfn_end = pfn_start + (memory_map[map_idx].size >> PAGE_SHIFT);
+ for ( i = pfn_start; i < pfn_end; i++ )
{
- sts = dump_rtn(args, dump_mem_start, dump_mem - dump_mem_start);
+ uint64_t gmfn;
+ void *vaddr;
+
+ if ( j >= nr_pages )
+ {
+ /*
+ * When live dump-mode (-L option) is specified,
+ * guest domain may increase memory.
+ */
+ IPRINTF("exceeded nr_pages (%ld) losing pages", nr_pages);
+ goto copy_done;
+ }
+
+ if ( !auto_translated_physmap )
+ {
+ gmfn = p2m[i];
+ if ( gmfn == INVALID_P2M_ENTRY )
+ continue;
+
+ p2m_array[j].pfn = i;
+ p2m_array[j].gmfn = gmfn;
+ }
+ else
+ {
+ gmfn = i;
+ pfn_array[j] = i;
+ }
+
+ vaddr = xc_map_foreign_range(
+ xc_handle, domid, PAGE_SIZE, PROT_READ, gmfn);
+ if ( vaddr == NULL )
+ continue;
+ memcpy(dump_mem, vaddr, PAGE_SIZE);
+ munmap(vaddr, PAGE_SIZE);
+ dump_mem += PAGE_SIZE;
+ if ( (j + 1) % DUMP_INCREMENT == 0 )
+ {
+ sts = dump_rtn(
+ args, dump_mem_start, dump_mem - dump_mem_start);
+ if ( sts != 0 )
+ goto out;
+ dump_mem = dump_mem_start;
+ }
+
+ j++;
+ }
+ }
+
+copy_done:
+ sts = dump_rtn(args, dump_mem_start, dump_mem - dump_mem_start);
+ if ( sts != 0 )
+ goto out;
+ if ( j < nr_pages )
+ {
+ /* When live dump-mode (-L option) is specified,
+ * guest domain may reduce memory. pad with zero pages.
+ */
+ IPRINTF("j (%ld) != nr_pages (%ld)", j , nr_pages);
+ memset(dump_mem_start, 0, PAGE_SIZE);
+ for (; j < nr_pages; j++) {
+ sts = dump_rtn(args, dump_mem_start, PAGE_SIZE);
if ( sts != 0 )
goto out;
- dump_mem = dump_mem_start;
- }
- }
+ if ( !auto_translated_physmap )
+ {
+ p2m_array[j].pfn = XC_CORE_INVALID_PFN;
+ p2m_array[j].gmfn = XC_CORE_INVALID_GMFN;
+ }
+ else
+ pfn_array[j] = XC_CORE_INVALID_PFN;
+ }
+ }
+
+ /* p2m/pfn table: .xen_p2m/.xen_pfn */
+ if ( !auto_translated_physmap )
+ sts = dump_rtn(
+ args, (char *)p2m_array, sizeof(p2m_array[0]) * nr_pages);
+ else
+ sts = dump_rtn(
+ args, (char *)pfn_array, sizeof(pfn_array[0]) * nr_pages);
+ if ( sts != 0 )
+ goto out;
/* elf section header string table: .shstrtab */
sts = dump_rtn(args, strtab->strings, strtab->current);
@@ -816,6 +823,8 @@ xc_domain_dumpcore_via_callback(int xc_h
sts = 0;
out:
+ if ( memory_map != NULL )
+ free(memory_map);
if ( p2m != NULL )
munmap(p2m, PAGE_SIZE * P2M_FL_ENTRIES);
if ( p2m_array != NULL )
diff -r 104aba61893e -r df860912f348 tools/libxc/xc_core.h
--- a/tools/libxc/xc_core.h Thu Aug 30 14:57:24 2007 +0100
+++ b/tools/libxc/xc_core.h Thu Aug 30 14:58:40 2007 +0100
@@ -107,6 +107,8 @@ struct xen_dumpcore_elfnote_format_versi
struct xen_dumpcore_elfnote_format_version_desc format_version;
};
+#define XC_CORE_INVALID_PFN (~(uint64_t)0)
+#define XC_CORE_INVALID_GMFN (~(uint64_t)0)
struct xen_dumpcore_p2m {
uint64_t pfn;
uint64_t gmfn;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|