WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 5/5] dump-core take 2: elf formatify and added PFN-GM

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 5/5] dump-core take 2: elf formatify and added PFN-GMFN table
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: Thu, 18 Jan 2007 15:53:08 +0900
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>, John Levon <levon@xxxxxxxxxxxxxxxxx>, Dave Anderson <anderson@xxxxxxxxxx>, xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Wed, 17 Jan 2007 22:54:57 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20070118065242.4302.39670.sendpatchset@xxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20070118065242.4302.39670.sendpatchset@xxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1169101101 -32400
# Node ID 7da70af62b577478389d36069c824c4f2180f95e
# Parent  dae81535b77157d2bc3c3547088f0ef512c3b5d2
Use the guest's own p2m table instead of xc_get_pfn_list(), which cannot handle
PFNs with no MFN.
Dump a zeroed page for PFNs with no MFN.
Clearly deprecate xc_get_pfn_list().
Do not include a P2M table with HVM domains.
Refuse to dump HVM until we can map its pages with PFNs.

Signed-off-by: John Levon <john.levon@xxxxxxx>


ELF formatified.
added PFN-GMFN table.
HVM domain support.
experimental IA64 support.
NOTE: IA64 support is for only review. It doesn't work because
Xen/IA64 doesn't support memory map hypercall.
TODO: Xen/IA64 memory map hypercall.
PATCHNAME: xm_dump_core_elf
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r dae81535b771 -r 7da70af62b57 tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c     Tue Jan 16 15:42:29 2007 +0900
+++ b/tools/libxc/xc_core.c     Thu Jan 18 15:18:21 2007 +0900
@@ -1,10 +1,18 @@
+/*
+ * Elf format, (pfn, gmfn) table, IA64 support.
+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ */
+
 #include "xg_private.h"
+#include "xc_elf.h"
+#include "xc_core.h"
 #include <stdlib.h>
 #include <unistd.h>
 
 /* number of pages to write at a time */
 #define DUMP_INCREMENT (4 * 1024)
-#define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
 
 static int
 copy_from_domain_page(int xc_handle,
@@ -21,107 +29,718 @@ copy_from_domain_page(int xc_handle,
     return 0;
 }
 
+#if defined(__i386__) || defined(__x86_64__)
+#define ELF_ARCH_DATA           ELFDATA2LSB
+#if defined (__i386__)
+# define ELF_ARCH_MACHINE       EM_386
+#else
+# define ELF_ARCH_MACHINE       EM_X86_64
+#endif
+
+static int
+map_p2m(int xc_handle, xc_dominfo_t *info, xen_pfn_t **live_p2m,
+        unsigned long *pfnp)
+{
+    /* Double and single indirect references to the live P2M table */
+    xen_pfn_t *live_p2m_frame_list_list = NULL;
+    xen_pfn_t *live_p2m_frame_list = NULL;
+    shared_info_t *live_shinfo = NULL;
+    uint32_t dom = info->domid;
+    unsigned long max_pfn = 0;
+    int ret = -1;
+    int err;
+
+    /* Map the shared info frame */
+    live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+        PROT_READ, info->shared_info_frame);
+
+    if ( !live_shinfo )
+    {
+        PERROR("Couldn't map live_shinfo");
+        goto out;
+    }
+
+    max_pfn = live_shinfo->arch.max_pfn;
+
+    if ( max_pfn < info->nr_pages  )
+    {
+        ERROR("max_pfn < nr_pages -1 (%lx < %lx", max_pfn, info->nr_pages - 1);
+        goto out;
+    }
+
+    live_p2m_frame_list_list =
+        xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
+                             live_shinfo->arch.pfn_to_mfn_frame_list_list);
+
+    if ( !live_p2m_frame_list_list )
+    {
+        PERROR("Couldn't map p2m_frame_list_list (errno %d)", errno);
+        goto out;
+    }
+
+    live_p2m_frame_list =
+        xc_map_foreign_batch(xc_handle, dom, PROT_READ,
+                             live_p2m_frame_list_list,
+                             P2M_FLL_ENTRIES);
+
+    if ( !live_p2m_frame_list )
+    {
+        PERROR("Couldn't map p2m_frame_list");
+        goto out;
+    }
+
+    *live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
+                                    live_p2m_frame_list,
+                                    P2M_FL_ENTRIES);
+
+    if ( !live_p2m )
+    {
+        PERROR("Couldn't map p2m table");
+        goto out;
+    }
+
+    *pfnp = max_pfn;
+
+
+    ret = 0;
+
+out:
+    err = errno;
+
+    if ( live_shinfo )
+        munmap(live_shinfo, PAGE_SIZE);
+
+    if ( live_p2m_frame_list_list )
+        munmap(live_p2m_frame_list_list, PAGE_SIZE);
+
+    if ( live_p2m_frame_list )
+        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
+
+    errno = err;
+    return ret;
+}
+
+#include <xen/hvm/e820.h>
+typedef struct e820entry memory_map_entry_t;
+
+static inline int
+memory_map_may_dump(const memory_map_entry_t *entry)
+{
+    return entry->type == E820_RAM && entry->size > 0;
+}
+
+static inline uint64_t
+memory_map_addr(const memory_map_entry_t *entry)
+{
+    return entry->addr;
+}
+
+static inline uint64_t
+memory_map_size(const memory_map_entry_t *entry)
+{
+    return entry->size;
+}
+
+#elif defined (__ia64__)
+#define ELF_ARCH_DATA           ELFDATA2LSB
+#define ELF_ARCH_MACHINE        EM_IA64
+
+static int
+map_p2m(int xc_handle, xc_dominfo_t *info, xen_pfn_t **live_p2m,
+        unsigned long *pfnp)
+{
+    errno = ENOSYS;
+    reutrn -1;
+}
+
+#include "xc_efi.h"
+typedef efi_memory_desc_t memory_map_entry_t;
+
+static inline int
+memory_map_may_dump(const memory_map_entry_t *md)
+{
+    switch ( md->type )
+    {
+    case EFI_RESERVED_TYPE:
+    case EFI_LOADER_CODE:
+    case EFI_LOADER_DATA:
+    case EFI_BOOT_SERVICES_CODE:
+    case EFI_BOOT_SERVICES_DATA:
+    case EFI_RUNTIME_SERVICES_CODE:
+    case EFI_RUNTIME_SERVICES_DATA:
+    case EFI_CONVENTIONAL_MEMORY:
+    case EFI_ACPI_RECLAIM_MEMORY:
+    case EFI_ACPI_MEMORY_NVS:
+    case EFI_PAL_CODE:
+        if ( !(md->attribute & EFI_MEMORY_WB) )
+            return 0;
+        return 1;
+
+    case EFI_MEMORY_MAPPED_IO:
+    case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+    case EFI_UNUSABLE_MEMORY:
+        return 0;
+
+    default:
+        break;
+    }
+    return 0;
+}
+
+static inline uint64_t
+memory_map_addr(const memory_map_entry_t *md)
+{
+    return md->phys_addr;
+}
+
+static inline uint64_t
+memory_map_size(const memory_map_entry_t *md)
+{
+    return md->num_pages << EFI_PAGE_SHIFT;
+}
+
+#else
+# error "unsupported architecture"
+#endif
+
+#ifndef ELF_CORE_EFLAGS
+#define ELF_CORE_EFLAGS 0
+#endif
+
+#ifndef INVLAID_MFN
+#define INVALID_MFN    (~0UL)
+#endif
+
+static int
+memory_map_get(int xc_handle, uint32_t domid,
+               memory_map_entry_t **entries, unsigned int *nr_entries)
+{
+    memory_map_entry_t *map;
+    int ret;
+
+    *nr_entries = 5; /* xc_hvm_builder allocates 5 entries */
+again:
+    ret = -1;
+    map = malloc(*nr_entries * sizeof(map[0]));
+    if ( map == NULL )
+    {
+        PERROR("Couldn't allocate e820 entry: nr_entries = %d", *nr_entries);
+        return ret;
+    }
+
+    ret = xc_domain_get_memmap(xc_handle, domid, map, nr_entries);
+    if ( ret != 0 )
+    {
+        if ( errno == EINVAL )
+        {
+            *nr_entries *= 2;
+            free(map);
+            goto again;
+        }
+    }
+    if ( ret == 0 )
+        *entries = map;
+    return ret;
+}
+
+static int
+get_phdr(Elf_Phdr **phdr, unsigned int *max_phdr, unsigned int *nr_phdr)
+{
+    Elf_Phdr *tmp;
+
+    (*nr_phdr)++;
+    if ( *nr_phdr < *max_phdr )
+        return 0;
+
+#define PHDR_INC        4096
+    if ( *max_phdr < PHDR_INC )
+        *max_phdr *= 2;
+    else 
+        *max_phdr += PHDR_INC;
+
+    tmp = realloc(*phdr, *max_phdr * sizeof(Elf_Phdr));
+    if ( tmp == NULL )
+        return -1;
+    *phdr = tmp;
+    return 0;
+}
+
+static void
+set_phdr(Elf_Phdr *phdr, unsigned long offset, uint64_t addr, uint64_t size)
+{
+    memset(phdr, 0, sizeof(*phdr));
+    phdr->p_type = PT_LOAD;
+    phdr->p_flags = PF_X | PF_W | PF_R;
+    phdr->p_offset = offset;
+    phdr->p_vaddr = 0;
+    phdr->p_paddr = addr;
+    phdr->p_filesz = size;
+    phdr->p_memsz = size;
+    phdr->p_align = 0;
+}
+
 int
 xc_domain_dumpcore_via_callback(int xc_handle,
                                 uint32_t domid,
                                 void *args,
                                 dumpcore_rtn_t dump_rtn)
 {
-    unsigned long nr_pages;
-    xen_pfn_t *page_array = NULL;
     xc_dominfo_t info;
-    int i, nr_vcpus = 0;
+    int nr_vcpus = 0;
     char *dump_mem, *dump_mem_start = NULL;
-    struct xc_core_header header;
     vcpu_guest_context_t  ctxt[MAX_VIRT_CPUS];
     char dummy[PAGE_SIZE];
     int dummy_len;
-    int sts;
+    int sts = -1;
+
+    unsigned long i;
+    unsigned long j;
+    unsigned long nr_pages;
+
+    memory_map_entry_t *memory_map = NULL;
+    unsigned int nr_memory_map;
+    unsigned int map_idx;
+    xen_pfn_t pfn;
+
+    int need_p2m_table; /* !XENFEAT_auto_translated_physmap */
+    xen_pfn_t *p2m = NULL;
+    unsigned long max_pfn = 0;
+    struct p2m *p2m_array = NULL;
+
+    int may_balloon;
+    unsigned long nr_pfn_array = 0;
+    xen_pfn_t *pfn_array = NULL;
+    
+    Elf_Ehdr ehdr;
+    unsigned long filesz;
+    unsigned long offset;
+    unsigned long fixup;
+#define INIT_PHDR       32
+    unsigned int max_phdr;
+    unsigned int nr_phdr;
+    Elf_Phdr *phdr;
+    struct xen_note note;
+    struct xen_core_header_desc core_header;
 
     if ( (dump_mem_start = malloc(DUMP_INCREMENT*PAGE_SIZE)) == NULL )
     {
         PERROR("Could not allocate dump_mem");
-        goto error_out;
+        goto out;
     }
 
     if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 )
     {
         PERROR("Could not get info for domain");
-        goto error_out;
-    }
+        goto out;
+    }
+
+#if defined(__i386__) || defined(__x86_64__)
+    need_p2m_table = 1;
+    may_balloon = 1;
+    if ( info.hvm )
+    {
+        need_p2m_table = 0;
+        may_balloon = 0;
+    }
+#elif defined (__ia64__)
+    need_p2m_table = 0;
+    may_balloon = 1;
+    if ( info.hvm )
+        may_balloon = 0;
+#else
+# error "unsupported archtecture"
+#endif
 
     if ( domid != info.domid )
     {
         PERROR("Domain %d does not exist", domid);
-        goto error_out;
+        goto out;
     }
 
     for ( i = 0; i <= info.max_vcpu_id; i++ )
-        if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0)
+        if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0 )
             nr_vcpus++;
-
+    if ( nr_vcpus == 0 )
+    {
+        PERROR("No VCPU context could be grabbed");
+        goto out;
+    }
+
+    /* obtain memory map */
+    sts = memory_map_get(xc_handle, domid, &memory_map, &nr_memory_map);
+    if ( sts != 0 )
+        goto out;
+#if 0
+    for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+        DPRINTF("%d: addr %llx size %llx\n", map_idx,
+                memory_map_addr(&memory_map[map_idx]),
+                memory_map_size(&memory_map[map_idx]));
+#endif
+    
     nr_pages = info.nr_pages;
-
-    header.xch_magic = info.hvm ? XC_CORE_MAGIC_HVM : XC_CORE_MAGIC;
-    header.xch_nr_vcpus = nr_vcpus;
-    header.xch_nr_pages = nr_pages;
-    header.xch_ctxt_offset = sizeof(struct xc_core_header);
-    header.xch_index_offset = sizeof(struct xc_core_header) +
-        sizeof(vcpu_guest_context_t)*nr_vcpus;
-    dummy_len = (sizeof(struct xc_core_header) +
-                 (sizeof(vcpu_guest_context_t) * nr_vcpus) +
-                 (nr_pages * sizeof(xen_pfn_t)));
-    header.xch_pages_offset = round_pgup(dummy_len);
-
-    sts = dump_rtn(args, (char *)&header, sizeof(struct xc_core_header));
+    if ( need_p2m_table )
+    {
+        /* obtain p2m table */
+        p2m_array = malloc(nr_pages * sizeof(struct p2m));
+        if ( p2m_array == NULL )
+        {
+            PERROR("Could not allocate p2m array");
+            goto out;
+        }
+
+        sts = map_p2m(xc_handle, &info, &p2m, &max_pfn);
+        if ( sts != 0 )
+            goto out;
+    }
+    else
+    {
+        unsigned long total_pages = 0;
+        unsigned long pages;
+
+        max_pfn = 0;
+        for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+        {
+
+            if ( !memory_map_may_dump(&memory_map[map_idx]) )
+                continue;
+
+            pages = memory_map_size(&memory_map[map_idx]) >> PAGE_SHIFT;
+            pfn = (memory_map_addr(&memory_map[map_idx]) >> PAGE_SHIFT) +
+                pages;
+            if ( max_pfn < pfn )
+                max_pfn = pfn;
+            total_pages += pages;
+        }
+
+        if ( may_balloon )
+        {
+            pfn_array = malloc(total_pages * sizeof(pfn_array[0]));
+            if ( pfn_array == NULL )
+            {
+                PERROR("Could not allocate pfn array");
+                goto out;
+            }
+            nr_pfn_array = total_pages;
+
+            total_pages = 0;
+            for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+            {
+                if ( !memory_map_may_dump(&memory_map[map_idx]) )
+                    continue;
+
+                pages = memory_map_size(&memory_map[map_idx]) >> PAGE_SHIFT;
+                pfn = memory_map_addr(&memory_map[map_idx]) >> PAGE_SHIFT;
+                for ( i = 0; i < pages; i++ )
+                    pfn_array[total_pages + i] = pfn + i;
+                total_pages += pages;
+            }
+
+            sts = xc_domain_translate_gpfn(xc_handle, domid, total_pages,
+                                           pfn_array, pfn_array);
+            if ( sts )
+                goto out;
+        }
+        else if ( nr_pages != total_pages )
+        {
+            PERROR("nr_pages(%ld) != total_pages (%ld)",
+                   nr_pages, total_pages);
+        }
+    }
+
+    memset(&ehdr, 0, sizeof(ehdr));
+    ehdr.e_ident[EI_MAG0] = ELFMAG0;
+    ehdr.e_ident[EI_MAG1] = ELFMAG1;
+    ehdr.e_ident[EI_MAG2] = ELFMAG2;
+    ehdr.e_ident[EI_MAG3] = ELFMAG3;
+    ehdr.e_ident[EI_CLASS] = ELFCLASS;
+    ehdr.e_ident[EI_DATA] = ELF_ARCH_DATA;
+    ehdr.e_ident[EI_VERSION] = EV_CURRENT;
+    ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV; 
+    ehdr.e_ident[EI_ABIVERSION] = EV_CURRENT;
+
+    ehdr.e_type = ET_CORE;
+    ehdr.e_machine = ELF_ARCH_MACHINE;
+    ehdr.e_version = EV_CURRENT;
+    ehdr.e_entry = 0;
+    ehdr.e_phoff = sizeof(ehdr);
+    ehdr.e_shoff = 0;
+    ehdr.e_flags = ELF_CORE_EFLAGS;
+    ehdr.e_ehsize = sizeof(ehdr);
+    ehdr.e_phentsize = sizeof(Elf_Phdr);
+    /* ehdr.e_phum isn't know here yet. fill it later */
+    ehdr.e_shentsize = 0;
+    ehdr.e_shnum = 0;
+    ehdr.e_shstrndx = 0;
+
+    /* create program header */
+    nr_phdr = 0;
+    max_phdr = INIT_PHDR;
+    phdr = malloc(max_phdr * sizeof(phdr[0]));
+    if ( phdr == NULL )
+    {
+        PERROR("Could not allocate memory");
+        goto out;
+    }
+    /* here the number of program header is unknown. fix up offset later. */
+    offset = sizeof(ehdr);
+    
+    /* note section */
+    filesz = sizeof(struct xen_core_header) + /* core header */
+        sizeof(struct xen_note) + sizeof(ctxt[0]) * nr_vcpus; /* vcpu context 
*/
+    if ( need_p2m_table )
+        filesz += sizeof(struct xen_note_p2m) + sizeof(p2m_array[0]) * 
nr_pages; /* p2m table */
+
+
+    memset(&phdr[nr_phdr], 0, sizeof(phdr[0]));
+    phdr[nr_phdr].p_type = PT_NOTE;
+    phdr[nr_phdr].p_flags = 0;
+    phdr[nr_phdr].p_offset = offset;
+    phdr[nr_phdr].p_vaddr = 0;
+    phdr[nr_phdr].p_paddr = 0;
+    phdr[nr_phdr].p_filesz = filesz;
+    phdr[nr_phdr].p_memsz = filesz;
+    phdr[nr_phdr].p_align = 0;
+
+    offset += filesz;
+
+#define INVALID_PFN     (~0UL)
+#define GET_SET_PHDR(offset, addr, size)                    \
+    do {                                                    \
+        sts = get_phdr(&phdr, &max_phdr, &nr_phdr);         \
+        if ( sts )                                          \
+            goto out;                                       \
+        set_phdr(&phdr[nr_phdr], (offset), (addr), (size)); \
+        (offset) += (size);                                 \
+    } while (0)
+#define SET_PHDR_IF_NECESSARY                                   \
+    do {                                                        \
+        if ( last_pfn != INVALID_PFN && size > 0 )              \
+            GET_SET_PHDR(offset, last_pfn << PAGE_SHIFT, size); \
+                                                                \
+        last_pfn = INVALID_PFN;                                 \
+        size = 0;                                               \
+    } while (0)
+
+    if ( need_p2m_table )
+    {
+        xen_pfn_t last_pfn = INVALID_PFN;
+        uint64_t size = 0;
+
+        j = 0;
+        for ( i = 0; i < max_pfn && j < nr_pages; i++ )
+        {
+            if ( last_pfn + (size >> PAGE_SHIFT) != i )
+                SET_PHDR_IF_NECESSARY;
+            
+            if ( p2m[i] == INVALID_P2M_ENTRY )
+                continue;
+        
+            if ( last_pfn == INVALID_PFN )
+                last_pfn = i;
+            size += PAGE_SIZE;
+
+            p2m_array[j].pfn = i;
+            p2m_array[j].gmfn = p2m[i];
+            j++;
+        }
+        SET_PHDR_IF_NECESSARY;
+
+        if ( j != nr_pages )
+            PERROR("j(%ld) != nr_pages (%ld)", j, nr_pages);
+    }
+    else if ( may_balloon )
+    {
+        unsigned long total_pages = 0;
+        j = 0;
+        for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+        {
+            unsigned long pages;
+            xen_pfn_t last_pfn;
+            uint64_t size;
+
+            if ( !memory_map_may_dump(&memory_map[map_idx]) )
+                continue;
+
+            pages = memory_map_size(&memory_map[map_idx]) >> PAGE_SHIFT;
+            pfn = memory_map_addr(&memory_map[map_idx]) >> PAGE_SHIFT;
+            last_pfn = INVALID_PFN;
+            size = 0;
+            
+            for ( i = 0; i < pages; i++ )
+            {
+                if ( last_pfn + (size >> PAGE_SHIFT) != pfn + i )
+                    SET_PHDR_IF_NECESSARY;
+
+                if ( pfn_array[total_pages + i] == INVALID_MFN )
+                    continue;
+#ifdef __ia64__
+                /* work around until fix ia64 gmfn_to_mfn() */
+                if ( pfn_array[total_pages + i] == 0 )
+                    continue;
+#endif
+            
+                if ( last_pfn == INVALID_PFN )
+                    last_pfn = pfn + i;
+                size += PAGE_SIZE;
+
+                pfn_array[j] = pfn + i;
+                j++;
+            }
+            SET_PHDR_IF_NECESSARY;
+
+            total_pages += pages;
+        }
+        if ( j != nr_pages )
+            PERROR("j(%ld) != nr_pages (%ld)", j, nr_pages);
+    }
+    else
+    {
+        for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+        {
+            uint64_t addr;
+            uint64_t size;
+            if ( !memory_map_may_dump(&memory_map[map_idx]) )
+                continue;
+            addr = memory_map_addr(&memory_map[map_idx]);
+            size = memory_map_size(&memory_map[map_idx]);
+            
+            GET_SET_PHDR(offset, addr, size);
+        }
+    }
+
+    nr_phdr++;
+
+    /* write out elf header */
+    ehdr.e_phnum = nr_phdr;
+    sts = dump_rtn(args, (char*)&ehdr, sizeof(ehdr));
     if ( sts != 0 )
-        goto error_out;
-
+        goto out;
+
+    fixup = nr_phdr * sizeof(phdr[0]);
+    /* fix up offset for note section */
+    phdr[0].p_offset += fixup;
+
+    dummy_len = ROUNDUP(offset + fixup, PAGE_SHIFT) - (offset + fixup); /* 
padding length */
+    fixup += dummy_len;
+    /* fix up offset for pages */
+    for ( i = 1; i < nr_phdr; i++ )
+        phdr[i].p_offset += fixup;
+    /* write out program header */
+    sts = dump_rtn(args, (char*)phdr, nr_phdr * sizeof(phdr[0]));
+    if ( sts != 0 )
+        goto out;
+
+    /* note section */
+    memset(&note, 0, sizeof(note));
+    note.namesz = strlen(XEN_NOTES) + 1;
+    strncpy(note.name, XEN_NOTES, sizeof(note.name));
+    
+    /* note section:xen core header */
+    note.descsz = sizeof(core_header);
+    note.type = NT_XEN_HEADER;
+    core_header.xch_magic = info.hvm ? XC_CORE_MAGIC_HVM : XC_CORE_MAGIC;
+    core_header.xch_nr_vcpus = nr_vcpus;
+    core_header.xch_nr_pages = nr_pages;
+    core_header.xch_page_size = PAGE_SIZE;
+    sts = dump_rtn(args, (char*)&note, sizeof(note));
+    if ( sts != 0 )
+        goto out;
+    sts = dump_rtn(args, (char*)&core_header, sizeof(core_header));
+    if ( sts != 0 )
+        goto out;
+
+    /* note section:xen vcpu prstatus */
+    note.descsz = sizeof(ctxt[0]) * nr_vcpus;
+    note.type = NT_XEN_PRSTATUS;
+    sts = dump_rtn(args, (char*)&note, sizeof(note));
+    if ( sts != 0 )
+        goto out;
     sts = dump_rtn(args, (char *)&ctxt, sizeof(ctxt[0]) * nr_vcpus);
     if ( sts != 0 )
-        goto error_out;
-
-    if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
-    {
-        IPRINTF("Could not allocate memory\n");
-        goto error_out;
-    }
-    if ( xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages )
-    {
-        IPRINTF("Could not get the page frame list\n");
-        goto error_out;
-    }
-    sts = dump_rtn(args, (char *)page_array, nr_pages * sizeof(xen_pfn_t));
-    if ( sts != 0 )
-        goto error_out;
-
+        goto out;
+    
+    /* note section:create p2m table */
+    if ( need_p2m_table )
+    {
+        note.descsz = sizeof(p2m_array[0]) * nr_pages;
+        note.type = NT_XEN_P2M;
+        sts = dump_rtn(args, (char*)&note, sizeof(note));
+        if ( sts != 0 )
+            goto out;
+        sts = dump_rtn(args, (char *)p2m_array,
+                       sizeof(p2m_array[0]) * nr_pages);
+        if ( sts != 0 )
+            goto out;
+    }
+    
     /* Pad the output data to page alignment. */
     memset(dummy, 0, PAGE_SIZE);
-    sts = dump_rtn(args, dummy, header.xch_pages_offset - dummy_len);
+    sts = dump_rtn(args, dummy, dummy_len);
     if ( sts != 0 )
-        goto error_out;
-
-    for ( dump_mem = dump_mem_start, i = 0; i < nr_pages; i++ )
-    {
-        copy_from_domain_page(xc_handle, domid, page_array[i], dump_mem);
-        dump_mem += PAGE_SIZE;
-        if ( ((i + 1) % DUMP_INCREMENT == 0) || ((i + 1) == nr_pages) )
-        {
-            sts = dump_rtn(args, dump_mem_start, dump_mem - dump_mem_start);
-            if ( sts != 0 )
-                goto error_out;
-            dump_mem = dump_mem_start;
-        }
-    }
-
+        goto out;
+
+#define DUMP_PAGE(gmfn)                                                 \
+    do {                                                                \
+        copy_from_domain_page(xc_handle, domid, (gmfn), dump_mem);      \
+        dump_mem += PAGE_SIZE;                                          \
+        if ( ((i + 1) % DUMP_INCREMENT == 0) || ((i + 1) == nr_pages) ) \
+        {                                                               \
+            sts = dump_rtn(args, dump_mem_start,                        \
+                           dump_mem - dump_mem_start);                  \
+            if ( sts != 0 )                                             \
+                goto out;                                               \
+            dump_mem = dump_mem_start;                                  \
+        }                                                               \
+    } while (0)
+    
+    /* dump pages */
+    if ( need_p2m_table || may_balloon )
+    {
+        for ( dump_mem = dump_mem_start, i = 0; i < nr_pages; i++ )
+        {
+            xen_pfn_t gmfn;
+            if ( need_p2m_table )
+                gmfn = p2m_array[i].gmfn;
+            else
+                gmfn = pfn_array[i]; /* may_balloon */
+
+            DUMP_PAGE(gmfn);
+        }
+    }
+    else
+    {
+        for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+        {
+            if ( !memory_map_may_dump(&memory_map[map_idx]) )
+                continue;
+
+            pfn = memory_map_addr(&memory_map[map_idx]) >> PAGE_SHIFT;
+            nr_pages = memory_map_size(&memory_map[map_idx]) >> PAGE_SHIFT;
+            DPRINTF("%s:%d pfn %lx nr_pages %lx\n",
+                    __func__, __LINE__, pfn, nr_pages);
+
+            for ( dump_mem = dump_mem_start, i = 0; i < nr_pages; i++ )
+                DUMP_PAGE(pfn + i);
+        }
+    }
+
+    sts = 0;
+
+out:
+    if ( p2m )
+    {
+        if ( info.hvm )
+            free( p2m );
+        else
+            munmap(p2m, P2M_SIZE);
+    }
     free(dump_mem_start);
-    free(page_array);
-    return 0;
-
- error_out:
-    free(dump_mem_start);
-    free(page_array);
-    return -1;
+    if ( p2m_array != NULL )
+        free(p2m_array);
+    if ( pfn_array != NULL )
+        free(pfn_array);
+    free(phdr);
+    return sts;
 }
 
 /* Callback args for writing to a local dump file. */
diff -r dae81535b771 -r 7da70af62b57 tools/libxc/xc_core.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_core.h     Thu Jan 18 15:18:21 2007 +0900
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#ifndef XC_CORE_H
+#define XC_CORE_H
+
+#define XEN_NOTES       "XEN CORE"
+
+/* Notes used in xen core*/
+#define NT_XEN_NOTEBASE 256 /* large enough which isn't used by others */
+#define NT_XEN_HEADER   (NT_XEN_NOTEBASE + 0)
+#define NT_XEN_PRSTATUS (NT_XEN_NOTEBASE + 1)
+#define NT_XEN_P2M      (NT_XEN_NOTEBASE + 2)
+
+
+struct xen_note {
+    uint32_t    namesz;
+    uint32_t    descsz;
+    uint32_t    type;
+    char        name[12]; /* to hold XEN_NOTES and 64bit aligned.
+                           * 8 <= sizeof(XEN_NOTES) < 12
+                           */
+};
+
+
+struct xen_core_header_desc {
+    uint64_t    xch_magic;
+    uint64_t    xch_nr_vcpus;
+    uint64_t    xch_nr_pages;
+    uint64_t    xch_page_size;
+};
+
+struct p2m {
+    xen_pfn_t pfn;
+    xen_pfn_t gmfn;
+};
+
+
+struct xen_core_header {
+    struct xen_note             note;
+    struct xen_core_header_desc core_header;
+};
+
+struct xen_note_prstatus {
+    struct xen_note             note;
+    vcpu_guest_context_t        ctxt[0];
+};
+
+struct xen_note_p2m {
+    struct xen_note     note;
+    struct p2m          p2m[0];
+};
+
+#endif /* XC_CORE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r dae81535b771 -r 7da70af62b57 tools/libxc/xc_efi.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_efi.h      Thu Jan 18 15:18:21 2007 +0900
@@ -0,0 +1,68 @@
+#ifndef XC_EFI_H
+#define XC_EFI_H
+
+/* definitions from xen/include/asm-ia64/linux-xen/linux/efi.h */
+
+/*
+ * Extensible Firmware Interface
+ * Based on 'Extensible Firmware Interface Specification' version 0.9, April 
30, 1999
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co.
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Stephane Eranian <eranian@xxxxxxxxxx>
+ */
+
+/*
+ * Memory map descriptor:
+ */
+
+/* Memory types: */
+#define EFI_RESERVED_TYPE                0
+#define EFI_LOADER_CODE                  1
+#define EFI_LOADER_DATA                  2
+#define EFI_BOOT_SERVICES_CODE           3
+#define EFI_BOOT_SERVICES_DATA           4
+#define EFI_RUNTIME_SERVICES_CODE        5
+#define EFI_RUNTIME_SERVICES_DATA        6
+#define EFI_CONVENTIONAL_MEMORY          7
+#define EFI_UNUSABLE_MEMORY              8
+#define EFI_ACPI_RECLAIM_MEMORY          9
+#define EFI_ACPI_MEMORY_NVS             10
+#define EFI_MEMORY_MAPPED_IO            11
+#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
+#define EFI_PAL_CODE                    13
+#define EFI_MAX_MEMORY_TYPE             14
+
+/* Attribute values: */
+#define EFI_MEMORY_UC           ((u64)0x0000000000000001ULL)    /* uncached */
+#define EFI_MEMORY_WC           ((u64)0x0000000000000002ULL)    /* 
write-coalescing */
+#define EFI_MEMORY_WT           ((u64)0x0000000000000004ULL)    /* 
write-through */
+#define EFI_MEMORY_WB           ((u64)0x0000000000000008ULL)    /* write-back 
*/
+#define EFI_MEMORY_WP           ((u64)0x0000000000001000ULL)    /* 
write-protect */
+#define EFI_MEMORY_RP           ((u64)0x0000000000002000ULL)    /* 
read-protect */
+#define EFI_MEMORY_XP           ((u64)0x0000000000004000ULL)    /* 
execute-protect */
+#define EFI_MEMORY_RUNTIME      ((u64)0x8000000000000000ULL)    /* range 
requires runtime mapping */
+#define EFI_MEMORY_DESCRIPTOR_VERSION   1
+
+#define EFI_PAGE_SHIFT          12
+
+/*
+ * For current x86 implementations of EFI, there is
+ * additional padding in the mem descriptors.  This is not
+ * the case in ia64.  Need to have this fixed in the f/w.
+ */
+typedef struct {
+        u32 type;
+        u32 pad;
+        u64 phys_addr;
+        u64 virt_addr;
+        u64 num_pages;
+        u64 attribute;
+#if defined (__i386__)
+        u64 pad1;
+#endif
+} efi_memory_desc_t;
+
+#endif /* XC_EFI_H */
diff -r dae81535b771 -r 7da70af62b57 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Jan 16 15:42:29 2007 +0900
+++ b/tools/libxc/xenctrl.h     Thu Jan 18 15:18:21 2007 +0900
@@ -529,6 +529,10 @@ unsigned long xc_translate_foreign_addre
 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
                                            int vcpu, unsigned long long virt);
 
+/**
+ * DEPRECATED.  Avoid using this, as it does not correctly account for PFNs
+ * without a backing MFN.
+ */
 int xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
                     unsigned long max_pfns);
 
diff -r dae81535b771 -r 7da70af62b57 tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h  Tue Jan 16 15:42:29 2007 +0900
+++ b/tools/libxc/xg_private.h  Thu Jan 18 15:18:21 2007 +0900
@@ -119,6 +119,25 @@ typedef unsigned long l4_pgentry_t;
   (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
 #endif
 
+#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
+
+/* Size in bytes of the P2M (rounded up to the nearest PAGE_SIZE bytes) */
+#define P2M_SIZE        ROUNDUP((max_pfn * sizeof(xen_pfn_t)), PAGE_SHIFT)
+
+/* Number of xen_pfn_t in a page */
+#define fpp             (PAGE_SIZE/sizeof(xen_pfn_t))
+
+/* Number of entries in the pfn_to_mfn_frame_list_list */
+#define P2M_FLL_ENTRIES (((max_pfn)+(fpp*fpp)-1)/(fpp*fpp))
+
+/* Number of entries in the pfn_to_mfn_frame_list */
+#define P2M_FL_ENTRIES  (((max_pfn)+fpp-1)/fpp)
+
+/* Size in bytes of the pfn_to_mfn_frame_list     */
+#define P2M_FL_SIZE     ((P2M_FL_ENTRIES)*sizeof(unsigned long))
+
+#define INVALID_P2M_ENTRY   (~0UL)
+
 struct domain_setup_info
 {
     uint64_t v_start;
diff -r dae81535b771 -r 7da70af62b57 tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h     Tue Jan 16 15:42:29 2007 +0900
+++ b/tools/libxc/xg_save_restore.h     Thu Jan 18 15:18:21 2007 +0900
@@ -82,7 +82,6 @@ static int get_platform_info(int xc_hand
 */
 
 #define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10))
-#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
 
 
 /*
@@ -95,25 +94,5 @@ static int get_platform_info(int xc_hand
 #define M2P_SIZE(_m)    ROUNDUP(((_m) * sizeof(xen_pfn_t)), M2P_SHIFT)
 #define M2P_CHUNKS(_m)  (M2P_SIZE((_m)) >> M2P_SHIFT)
 
-/* Size in bytes of the P2M (rounded up to the nearest PAGE_SIZE bytes) */
-#define P2M_SIZE        ROUNDUP((max_pfn * sizeof(xen_pfn_t)), PAGE_SHIFT)
-
-/* Number of xen_pfn_t in a page */
-#define fpp             (PAGE_SIZE/sizeof(xen_pfn_t))
-
-/* Number of entries in the pfn_to_mfn_frame_list */
-#define P2M_FL_ENTRIES  (((max_pfn)+fpp-1)/fpp)
-
-/* Size in bytes of the pfn_to_mfn_frame_list     */
-#define P2M_FL_SIZE     ((P2M_FL_ENTRIES)*sizeof(unsigned long))
-
-/* Number of entries in the pfn_to_mfn_frame_list_list */
-#define P2M_FLL_ENTRIES (((max_pfn)+(fpp*fpp)-1)/(fpp*fpp))
-
 /* Returns TRUE if the PFN is currently mapped */
 #define is_mapped(pfn_type) (!((pfn_type) & 0x80000000UL))
-
-#define INVALID_P2M_ENTRY   (~0UL)
-
-
-
--
yamahata

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel