# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 74ee53209cca47ee228c7772176b196593a5260b
# Parent 1d8b3c85121de189882dee5e2dae54697cf52499
Fix whitespace in libxc. Tabs are manually fixed.
Trailing whitespace removed with:
perl -p -i -e 's/\s+$/\n/g' tools/libxc/*.[ch]
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_bvtsched.c
--- a/tools/libxc/xc_bvtsched.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_bvtsched.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_bvtsched.c
- *
+ *
* API for manipulating parameters of the Borrowed Virtual Time scheduler.
- *
+ *
* Copyright (c) 2003, K A Fraser.
*/
@@ -26,7 +26,7 @@
{
DECLARE_DOM0_OP;
int ret;
-
+
op.cmd = DOM0_SCHEDCTL;
op.u.schedctl.sched_id = SCHED_BVT;
op.u.schedctl.direction = SCHED_INFO_GET;
@@ -71,7 +71,7 @@
long long *warpl,
long long *warpu)
{
-
+
DECLARE_DOM0_OP;
int ret;
struct bvt_adjdom *adjptr = &op.u.adjustdom.u.bvt;
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_core.c Sat Apr 15 09:07:31 2006
@@ -23,7 +23,7 @@
return 0;
}
-int
+int
xc_domain_dumpcore_via_callback(int xc_handle,
uint32_t domid,
void *args,
@@ -45,13 +45,13 @@
PERROR("Could not allocate dump_mem");
goto error_out;
}
-
+
if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 )
{
PERROR("Could not get info for domain");
goto error_out;
}
-
+
if ( domid != info.domid )
{
PERROR("Domain %d does not exist", domid);
@@ -61,10 +61,10 @@
for ( i = 0; i <= info.max_vcpu_id; i++ )
if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0)
nr_vcpus++;
-
+
nr_pages = info.nr_pages;
- header.xch_magic = XC_CORE_MAGIC;
+ header.xch_magic = XC_CORE_MAGIC;
header.xch_nr_vcpus = nr_vcpus;
header.xch_nr_pages = nr_pages;
header.xch_ctxt_offset = sizeof(struct xc_core_header);
@@ -74,7 +74,7 @@
(sizeof(vcpu_guest_context_t) * nr_vcpus) +
(nr_pages * sizeof(unsigned long)));
header.xch_pages_offset = round_pgup(dummy_len);
-
+
sts = dump_rtn(args, (char *)&header, sizeof(struct xc_core_header));
if ( sts != 0 )
goto error_out;
@@ -150,7 +150,7 @@
return 0;
}
-int
+int
xc_domain_dumpcore(int xc_handle,
uint32_t domid,
const char *corename)
@@ -163,7 +163,7 @@
PERROR("Could not open corefile %s: %s", corename, strerror(errno));
return -errno;
}
-
+
sts = xc_domain_dumpcore_via_callback(
xc_handle, domid, &da, &local_file_dump);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_domain.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_domain.c
- *
+ *
* API for manipulating and obtaining information on domains.
- *
+ *
* Copyright (c) 2003, K A Fraser.
*/
@@ -26,17 +26,17 @@
*pdomid = (uint16_t)op.u.createdomain.domain;
return 0;
-}
-
-
-int xc_domain_pause(int xc_handle,
+}
+
+
+int xc_domain_pause(int xc_handle,
uint32_t domid)
{
DECLARE_DOM0_OP;
op.cmd = DOM0_PAUSEDOMAIN;
op.u.pausedomain.domain = (domid_t)domid;
return do_dom0_op(xc_handle, &op);
-}
+}
int xc_domain_unpause(int xc_handle,
@@ -46,7 +46,7 @@
op.cmd = DOM0_UNPAUSEDOMAIN;
op.u.unpausedomain.domain = (domid_t)domid;
return do_dom0_op(xc_handle, &op);
-}
+}
int xc_domain_destroy(int xc_handle,
@@ -88,7 +88,7 @@
int xc_vcpu_setaffinity(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
int vcpu,
cpumap_t cpumap)
{
@@ -109,7 +109,7 @@
unsigned int nr_doms;
uint32_t next_domid = first_domid;
DECLARE_DOM0_OP;
- int rc = 0;
+ int rc = 0;
memset(info, 0, max_doms*sizeof(xc_dominfo_t));
@@ -127,8 +127,8 @@
info->blocked = !!(op.u.getdomaininfo.flags & DOMFLAGS_BLOCKED);
info->running = !!(op.u.getdomaininfo.flags & DOMFLAGS_RUNNING);
- info->shutdown_reason =
- (op.u.getdomaininfo.flags>>DOMFLAGS_SHUTDOWNSHIFT) &
+ info->shutdown_reason =
+ (op.u.getdomaininfo.flags>>DOMFLAGS_SHUTDOWNSHIFT) &
DOMFLAGS_SHUTDOWNMASK;
if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) )
@@ -152,7 +152,7 @@
info++;
}
- if( !nr_doms ) return rc;
+ if( !nr_doms ) return rc;
return nr_doms;
}
@@ -167,7 +167,7 @@
if ( mlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
return -1;
-
+
op.cmd = DOM0_GETDOMAININFOLIST;
op.u.getdomaininfolist.first_domain = first_domain;
op.u.getdomaininfolist.max_domains = max_domains;
@@ -177,10 +177,10 @@
ret = -1;
else
ret = op.u.getdomaininfolist.num_domains;
-
+
if ( munlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
ret = -1;
-
+
return ret;
}
@@ -209,7 +209,7 @@
int xc_shadow_control(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned int sop,
unsigned long *dirty_bitmap,
unsigned long pages,
@@ -238,11 +238,11 @@
{
int sched_id;
int ret;
-
+
/* Figure out which scheduler is currently used: */
if ( (ret = xc_sched_id(xc_handle, &sched_id)) != 0 )
return ret;
-
+
switch ( sched_id )
{
case SCHED_BVT:
@@ -253,20 +253,20 @@
long long warpl;
long long warpu;
- /* Preserve all the scheduling parameters apart
+ /* Preserve all the scheduling parameters apart
of MCU advance. */
if ( (ret = xc_bvtsched_domain_get(
- xc_handle, domid, &mcuadv,
+ xc_handle, domid, &mcuadv,
&warpback, &warpvalue, &warpl, &warpu)) != 0 )
return ret;
-
+
/* The MCU advance is inverse of the weight.
Default value of the weight is 1, default mcuadv 10.
The scaling factor is therefore 10. */
if ( weight > 0 )
mcuadv = 10 / weight;
-
- ret = xc_bvtsched_domain_set(xc_handle, domid, mcuadv,
+
+ ret = xc_bvtsched_domain_set(xc_handle, domid, mcuadv,
warpback, warpvalue, warpl, warpu);
break;
}
@@ -276,7 +276,7 @@
}
int xc_domain_setmaxmem(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned int max_memkb)
{
DECLARE_DOM0_OP;
@@ -287,7 +287,7 @@
}
int xc_domain_memory_increase_reservation(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned int address_bits,
@@ -297,7 +297,7 @@
struct xen_memory_reservation reservation = {
.extent_start = extent_start, /* may be NULL */
.nr_extents = nr_extents,
- .extent_order = extent_order,
+ .extent_order = extent_order,
.address_bits = address_bits,
.domid = domid
};
@@ -319,16 +319,16 @@
}
int xc_domain_memory_decrease_reservation(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned long *extent_start)
{
int err;
struct xen_memory_reservation reservation = {
- .extent_start = extent_start,
+ .extent_start = extent_start,
.nr_extents = nr_extents,
- .extent_order = extent_order,
+ .extent_order = extent_order,
.address_bits = 0,
.domid = domid
};
@@ -411,7 +411,7 @@
return do_dom0_op(xc_handle, &op);
}
-int xc_domain_sethandle(int xc_handle, uint32_t domid,
+int xc_domain_sethandle(int xc_handle, uint32_t domid,
xen_domain_handle_t handle)
{
DECLARE_DOM0_OP;
@@ -506,7 +506,7 @@
op.cmd = DOM0_IOMEM_PERMISSION;
op.u.iomem_permission.domain = domid;
op.u.iomem_permission.first_mfn = first_mfn;
- op.u.iomem_permission.nr_mfns = nr_mfns;
+ op.u.iomem_permission.nr_mfns = nr_mfns;
op.u.iomem_permission.allow_access = allow_access;
return do_dom0_op(xc_handle, &op);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_elf.h
--- a/tools/libxc/xc_elf.h Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_elf.h Sat Apr 15 09:07:31 2006
@@ -46,7 +46,7 @@
typedef uint16_t Elf64_Quarter;
/*
- * e_ident[] identification indexes
+ * e_ident[] identification indexes
* See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
*/
#define EI_MAG0 0 /* file ID */
@@ -57,7 +57,7 @@
#define EI_DATA 5 /* data encoding */
#define EI_VERSION 6 /* ELF header version */
#define EI_OSABI 7 /* OS/ABI ID */
-#define EI_ABIVERSION 8 /* ABI version */
+#define EI_ABIVERSION 8 /* ABI version */
#define EI_PAD 9 /* start of pad bytes */
#define EI_NIDENT 16 /* Size of e_ident[] */
@@ -119,7 +119,7 @@
Elf32_Half e_phnum; /* number of program header entries */
Elf32_Half e_shentsize; /* section header entry size */
Elf32_Half e_shnum; /* number of section header entries */
- Elf32_Half e_shstrndx; /* section header table's "section
+ Elf32_Half e_shstrndx; /* section header table's "section
header string table" entry offset */
} Elf32_Ehdr;
@@ -160,7 +160,7 @@
#define EM_486 6 /* Intel 80486 - unused? */
#define EM_860 7 /* Intel 80860 */
#define EM_MIPS 8 /* MIPS R3000 Big-Endian only */
-/*
+/*
* Don't know if EM_MIPS_RS4_BE,
* EM_SPARC64, EM_PARISC,
* or EM_PPC are ABI compliant
@@ -441,7 +441,7 @@
#define DT_NUM 25 /* Number used. */
#define DT_LOPROC 0x70000000 /* reserved range for processor */
#define DT_HIPROC 0x7fffffff /* specific dynamic array tags */
-
+
/* Standard ELF hashing function */
unsigned int elf_hash(const unsigned char *name);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_evtchn.c
--- a/tools/libxc/xc_evtchn.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_evtchn.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_evtchn.c
- *
+ *
* API for manipulating and accessing inter-domain event channels.
- *
+ *
* Copyright (c) 2004, K A Fraser.
*/
@@ -44,7 +44,7 @@
if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
rc = op.u.alloc_unbound.port;
-
+
return rc;
}
@@ -62,6 +62,6 @@
if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
memcpy(status, &op.u.status, sizeof(*status));
-
+
return rc;
}
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_ia64_stubs.c
--- a/tools/libxc/xc_ia64_stubs.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_ia64_stubs.c Sat Apr 15 09:07:31 2006
@@ -22,7 +22,7 @@
return FPSR_DEFAULT;
}
-int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
+int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
int (*suspend)(int domid))
{
@@ -50,8 +50,8 @@
}
int xc_ia64_get_pfn_list(int xc_handle,
- uint32_t domid,
- unsigned long *pfn_buf,
+ uint32_t domid,
+ unsigned long *pfn_buf,
unsigned int start_page,
unsigned int nr_pages)
{
@@ -65,16 +65,16 @@
op.u.getmemlist.buffer = pfn_buf;
if ( (max_pfns != -1UL)
- && mlock(pfn_buf, nr_pages * sizeof(unsigned long)) != 0 )
+ && mlock(pfn_buf, nr_pages * sizeof(unsigned long)) != 0 )
{
PERROR("Could not lock pfn list buffer");
return -1;
- }
+ }
ret = do_dom0_op(xc_handle, &op);
if (max_pfns != -1UL)
- (void)munlock(pfn_buf, nr_pages * sizeof(unsigned long));
+ (void)munlock(pfn_buf, nr_pages * sizeof(unsigned long));
return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
}
@@ -84,7 +84,7 @@
dom0_op_t op;
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)domid;
- return (do_dom0_op(xc_handle, &op) < 0) ?
+ return (do_dom0_op(xc_handle, &op) < 0) ?
-1 : op.u.getdomaininfo.max_pages;
}
@@ -92,7 +92,7 @@
void* src_page, unsigned long dst_pfn, int nr_pages)
{
// N.B. gva should be page aligned
-
+
unsigned long *page_array = NULL;
int i;
@@ -107,13 +107,13 @@
}
for ( i=0; i< nr_pages; i++ ){
- if (xc_copy_to_domain_page(xc_handle, domid, page_array[i],
- src_page + (i << PAGE_SHIFT)))
- goto error_out;
+ if (xc_copy_to_domain_page(xc_handle, domid, page_array[i],
+ src_page + (i << PAGE_SHIFT)))
+ goto error_out;
}
free(page_array);
return 0;
-
+
error_out:
free(page_array);
return -1;
@@ -123,8 +123,8 @@
#define HOB_SIGNATURE 0x3436474953424f48 // "HOBSIG64"
#define GFW_HOB_START ((4UL<<30)-(14UL<<20)) //4G -14M
#define GFW_HOB_SIZE (1UL<<20) //1M
-#define MEM_G (1UL << 30)
-#define MEM_M (1UL << 20)
+#define MEM_G (1UL << 30)
+#define MEM_M (1UL << 20)
typedef struct {
unsigned long signature;
@@ -136,7 +136,7 @@
* INFO HOB is the first data data in one HOB list
* it contains the control information of the HOB list
*/
-typedef struct {
+typedef struct {
HOB_GENERIC_HEADER header;
unsigned long length; // current length of hob
unsigned long cur_pos; // current poisiton of hob
@@ -216,7 +216,7 @@
// buffer too small
return -1;
}
-
+
phit = (HOB_INFO*)buffer;
phit->header.signature = HOB_SIGNATURE;
phit->header.type = HOB_TYPE_INFO;
@@ -224,7 +224,7 @@
phit->length = sizeof(HOB_INFO) + sizeof(HOB_GENERIC_HEADER);
phit->cur_pos = 0;
phit->buf_size = buf_size;
-
+
terminal = (HOB_GENERIC_HEADER*) (buffer + sizeof(HOB_INFO));
terminal->signature= HOB_SIGNATURE;
terminal->type = HOB_TYPE_TERMINAL;
@@ -235,7 +235,7 @@
/*
* Add a new HOB to the HOB List.
- *
+ *
* hob_start - start address of hob buffer
* type - type of the hob to be added
* data - data of the hob to be added
@@ -250,8 +250,8 @@
)
{
HOB_INFO *phit;
- HOB_GENERIC_HEADER *newhob,*tail;
-
+ HOB_GENERIC_HEADER *newhob,*tail;
+
phit = (HOB_INFO*)hob_start;
if (phit->length + data_size > phit->buf_size){
@@ -259,7 +259,7 @@
return -1;
}
- //append new HOB
+ //append new HOB
newhob = (HOB_GENERIC_HEADER*)
(hob_start + phit->length - sizeof(HOB_GENERIC_HEADER));
newhob->signature = HOB_SIGNATURE;
@@ -267,7 +267,7 @@
newhob->length = data_size + sizeof(HOB_GENERIC_HEADER);
memcpy((void*)newhob + sizeof(HOB_GENERIC_HEADER), data, data_size);
- // append terminal HOB
+ // append terminal HOB
tail = (HOB_GENERIC_HEADER*) ( hob_start + phit->length + data_size);
tail->signature = HOB_SIGNATURE;
tail->type = HOB_TYPE_TERMINAL;
@@ -281,9 +281,9 @@
}
int get_hob_size(void* hob_buf){
-
+
HOB_INFO *phit = (HOB_INFO*)hob_buf;
-
+
if (phit->header.signature != HOB_SIGNATURE){
PERROR("xc_get_hob_size:Incorrect signature");
return -1;
@@ -293,30 +293,30 @@
int build_hob (void* hob_buf, unsigned long hob_buf_size,
unsigned long dom_mem_size)
-{
- //Init HOB List
+{
+ //Init HOB List
if (hob_init (hob_buf, hob_buf_size)<0){
PERROR("buffer too small");
goto err_out;
}
-
+
if ( add_mem_hob( hob_buf,dom_mem_size) < 0){
PERROR("Add memory hob failed, buffer too small");
goto err_out;
}
-
+
if ( add_pal_hob( hob_buf ) < 0 ){
PERROR("Add PAL hob failed, buffer too small");
goto err_out;
}
-
+
return 0;
err_out:
- return -1;
-}
-
-static int
+ return -1;
+}
+
+static int
load_hob(int xc_handle, uint32_t dom, void *hob_buf)
{
// hob_buf should be page aligned
@@ -334,22 +334,22 @@
}
nr_pages = (hob_size + PAGE_SIZE -1) >> PAGE_SHIFT;
-
+
return xc_ia64_copy_to_domain_pages(xc_handle, dom,
hob_buf, GFW_HOB_START, nr_pages );
}
#define MIN(x, y) ((x) < (y)) ? (x) : (y)
-static int
+static int
add_mem_hob(void* hob_buf, unsigned long dom_mem_size){
hob_mem_t memhob;
// less than 3G
memhob.start = 0;
memhob.size = MIN(dom_mem_size, 0xC0000000);
-
+
if (hob_add(hob_buf, HOB_TYPE_MEM, &memhob, sizeof(memhob)) < 0){
- return -1;
+ return -1;
}
if (dom_mem_size > 0xC0000000) {
@@ -373,29 +373,29 @@
};
unsigned char config_pal_cache_info[152] = {
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 6, 4, 6, 7, 255, 1, 0, 1, 0, 64, 0, 0, 12, 12,
+ 6, 4, 6, 7, 255, 1, 0, 1, 0, 64, 0, 0, 12, 12,
49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 6, 7, 0, 1,
- 0, 1, 0, 64, 0, 0, 12, 12, 49, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 6, 8, 7, 7, 255, 7, 0, 11, 0, 0, 16, 0,
- 12, 17, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 7,
+ 0, 1, 0, 64, 0, 0, 12, 12, 49, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 6, 8, 7, 7, 255, 7, 0, 11, 0, 0, 16, 0,
+ 12, 17, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 7,
7, 7, 5, 9, 11, 0, 0, 4, 0, 12, 15, 49, 0, 254, 255,
- 255, 255, 255, 255, 255, 255, 2, 8, 7, 7, 7, 5, 9,
- 11, 0, 0, 4, 0, 12, 15, 49, 0, 0, 0, 0, 0, 0, 0, 0,
+ 255, 255, 255, 255, 255, 255, 2, 8, 7, 7, 7, 5, 9,
+ 11, 0, 0, 4, 0, 12, 15, 49, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 12, 7, 7, 7, 14, 1, 3, 0, 0, 192, 0, 12, 20, 49, 0
};
unsigned char config_pal_cache_prot_info[200] = {
- 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
45, 0, 16, 8, 0, 76, 12, 64, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 8, 0, 16, 4, 0, 76, 44, 68, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32,
- 0, 16, 8, 0, 81, 44, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 8, 0, 16, 4, 0, 76, 44, 68, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32,
+ 0, 16, 8, 0, 81, 44, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0,
- 112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 254, 255, 255, 255, 255, 255, 255, 255,
+ 112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 254, 255, 255, 255, 255, 255, 255, 255,
32, 0, 112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 160,
- 12, 0, 84, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 160,
+ 12, 0, 84, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0
};
unsigned char config_pal_debug_info[16] = {
@@ -408,37 +408,37 @@
109, 219, 182, 13, 0, 0, 0, 0
};
unsigned char config_pal_freq_ratios[24] = {
- 11, 1, 0, 0, 77, 7, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 4,
+ 11, 1, 0, 0, 77, 7, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 4,
0, 0, 0, 7, 0, 0, 0
};
unsigned char config_pal_halt_info[64] = {
- 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
unsigned char config_pal_perf_mon_info[136] = {
- 12, 47, 18, 8, 0, 0, 0, 0, 241, 255, 0, 0, 255, 7, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 241, 255, 0, 0, 223, 0, 255, 255,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 12, 47, 18, 8, 0, 0, 0, 0, 241, 255, 0, 0, 255, 7, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 241, 255, 0, 0, 223, 0, 255, 255,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
unsigned char config_pal_proc_get_features[104] = {
- 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 64, 6, 64, 49, 0, 0, 0, 0, 64, 6, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0,
- 231, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 64, 6, 64, 49, 0, 0, 0, 0, 64, 6, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0,
+ 231, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0,
63, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
unsigned char config_pal_ptce_info[24] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
unsigned char config_pal_register_info[64] = {
@@ -473,7 +473,7 @@
typedef struct{
hob_type_t type;
void* data;
- unsigned long size;
+ unsigned long size;
}hob_batch_t;
hob_batch_t hob_batch[]={
@@ -552,13 +552,13 @@
{ HOB_TYPE_PAL_VM_PAGE_SIZE,
&config_pal_vm_page_size,
sizeof(config_pal_vm_page_size)
- },
+ },
};
static int add_pal_hob(void* hob_buf){
int i;
for (i=0; i<sizeof(hob_batch)/sizeof(hob_batch_t); i++){
- if (hob_add(hob_buf, hob_batch[i].type,
+ if (hob_add(hob_buf, hob_batch[i].type,
hob_batch[i].data,
hob_batch[i].size)<0)
return -1;
@@ -579,17 +579,17 @@
// FIXME: initialize pfn list for a temp hack
if (xc_ia64_get_pfn_list(xc_handle, dom, NULL, -1, -1) == -1) {
- PERROR("Could not allocate continuous memory");
- goto error_out;
- }
-
+ PERROR("Could not allocate continuous memory");
+ goto error_out;
+ }
+
if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
PERROR("Guest firmware size is incorrect [%ld]?", image_size);
return -1;
}
/* Load guest firmware */
- if( xc_ia64_copy_to_domain_pages( xc_handle, dom,
+ if( xc_ia64_copy_to_domain_pages( xc_handle, dom,
image, 4*MEM_G-image_size, image_size>>PAGE_SHIFT)) {
PERROR("Could not load guest firmware into domain");
goto error_out;
@@ -610,9 +610,9 @@
*store_mfn = page_array[1];
if ((sp = (shared_iopage_t *) xc_map_foreign_range(
- xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
- page_array[0])) == 0)
- goto error_out;
+ xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
+ page_array[0])) == 0)
+ goto error_out;
memset(sp, 0, PAGE_SIZE);
for (i = 0; i < vcpus; i++) {
@@ -665,14 +665,14 @@
image_size = (image_size + PAGE_SIZE - 1) & PAGE_MASK;
- if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ){
+ if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ){
PERROR("Unable to mlock ctxt");
return 1;
}
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)domid;
- if ( (do_dom0_op(xc_handle, &op) < 0) ||
+ if ( (do_dom0_op(xc_handle, &op) < 0) ||
((uint16_t)op.u.getdomaininfo.domain != domid) ) {
PERROR("Could not get info on domain");
goto error_out;
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_linux_build.c Sat Apr 15 09:07:31 2006
@@ -237,7 +237,7 @@
else
{
*vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
- if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
+ if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
(count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
*vl1e &= ~_PAGE_RW;
}
@@ -314,7 +314,7 @@
else
*vl2e++ = l1tab | L2_PROT;
}
-
+
if ( shadow_mode_enabled )
{
*vl1e = (count << PAGE_SHIFT) | L1_PROT;
@@ -323,12 +323,12 @@
{
*vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
- (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
+ (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
*vl1e &= ~_PAGE_RW;
}
vl1e++;
}
-
+
munmap(vl1tab, PAGE_SIZE);
munmap(vl2tab, PAGE_SIZE);
munmap(vl3tab, PAGE_SIZE);
@@ -376,13 +376,13 @@
ctxt->ctrlreg[3] = pl4tab;
else
ctxt->ctrlreg[3] = l4tab;
-
+
for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
{
if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
{
alloc_pt(l1tab, vl1tab, pl1tab);
-
+
if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
{
alloc_pt(l2tab, vl2tab, pl2tab);
@@ -410,7 +410,7 @@
*vl2e = l1tab | L2_PROT;
vl2e++;
}
-
+
if ( shadow_mode_enabled )
{
*vl1e = (count << PAGE_SHIFT) | L1_PROT;
@@ -419,14 +419,14 @@
{
*vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
- (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
+ (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
{
*vl1e &= ~_PAGE_RW;
}
}
vl1e++;
}
-
+
munmap(vl1tab, PAGE_SIZE);
munmap(vl2tab, PAGE_SIZE);
munmap(vl3tab, PAGE_SIZE);
@@ -509,7 +509,7 @@
" Loaded kernel: %p->%p\n"
" Init. ramdisk: %p->%p\n"
" TOTAL: %p->%p\n",
- _p(dsi.v_kernstart), _p(dsi.v_kernend),
+ _p(dsi.v_kernstart), _p(dsi.v_kernend),
_p(vinitrd_start), _p(vinitrd_end),
_p(dsi.v_start), _p(v_end));
printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
@@ -696,10 +696,10 @@
required_features);
/*
- * Why do we need this? The number of page-table frames depends on the
- * size of the bootstrap address space. But the size of the address space
- * depends on the number of page-table frames (since each one is mapped
- * read-only). We have a pair of simultaneous equations in two unknowns,
+ * Why do we need this? The number of page-table frames depends on the
+ * size of the bootstrap address space. But the size of the address space
+ * depends on the number of page-table frames (since each one is mapped
+ * read-only). We have a pair of simultaneous equations in two unknowns,
* which we solve by exhaustive search.
*/
v_end = round_pgup(dsi.v_end);
@@ -731,13 +731,13 @@
if ( dsi.pae_kernel )
{
/* FIXME: assumes one L2 pgtable @ 0xc0000000 */
- if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >>
+ if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >>
L2_PAGETABLE_SHIFT_PAE) + 2) <= nr_pt_pages )
break;
}
else
{
- if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
+ if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
break;
}
@@ -873,7 +873,7 @@
count) )
{
fprintf(stderr,"m2p update failure p=%lx m=%lx\n",
- count, page_array[count]);
+ count, page_array[count]);
munmap(physmap, PAGE_SIZE);
goto error_out;
}
@@ -982,7 +982,7 @@
start_info->mod_len = initrd->len;
}
if ( cmdline != NULL )
- {
+ {
strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
}
@@ -1073,14 +1073,14 @@
#endif
if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
- {
+ {
PERROR("%s: ctxt mlock failed", __func__);
return 1;
}
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)domid;
- if ( (xc_dom0_op(xc_handle, &op) < 0) ||
+ if ( (xc_dom0_op(xc_handle, &op) < 0) ||
((uint16_t)op.u.getdomaininfo.domain != domid) )
{
PERROR("Could not get info on domain");
@@ -1089,9 +1089,9 @@
memset(ctxt, 0, sizeof(*ctxt));
- if ( setup_guest(xc_handle, domid, image, image_size,
+ if ( setup_guest(xc_handle, domid, image, image_size,
initrd,
- nr_pages,
+ nr_pages,
&vstartinfo_start, &vkern_entry,
&vstack_start, ctxt, cmdline,
op.u.getdomaininfo.shared_info_frame,
@@ -1152,7 +1152,7 @@
/* No LDT. */
ctxt->ldt_ents = 0;
-
+
/* Use the default Xen-provided GDT. */
ctxt->gdt_ents = 0;
@@ -1184,7 +1184,7 @@
launch_op.cmd = DOM0_SETVCPUCONTEXT;
rc = xc_dom0_op(xc_handle, &launch_op);
-
+
return rc;
error_out:
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_linux_restore.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_linux_restore.c
- *
+ *
* Restore the state of a Linux session.
- *
+ *
* Copyright (c) 2003, K A Fraser.
*/
@@ -13,13 +13,13 @@
#include "xg_save_restore.h"
/* max mfn of the whole machine */
-static unsigned long max_mfn;
+static unsigned long max_mfn;
/* virtual starting address of the hypervisor */
-static unsigned long hvirt_start;
+static unsigned long hvirt_start;
/* #levels of page tables used by the currrent guest */
-static unsigned int pt_levels;
+static unsigned int pt_levels;
/* total number of pages used by the current guest */
static unsigned long max_pfn;
@@ -41,84 +41,84 @@
s = read(fd, &b[r], count - r);
if ((s == -1) && (errno == EINTR))
continue;
- if (s <= 0) {
+ if (s <= 0) {
break;
- }
+ }
r += s;
}
- return (r == count) ? 1 : 0;
+ return (r == count) ? 1 : 0;
}
/*
-** In the state file (or during transfer), all page-table pages are
-** converted into a 'canonical' form where references to actual mfns
-** are replaced with references to the corresponding pfns.
-** This function inverts that operation, replacing the pfn values with
-** the (now known) appropriate mfn values.
+** In the state file (or during transfer), all page-table pages are
+** converted into a 'canonical' form where references to actual mfns
+** are replaced with references to the corresponding pfns.
+** This function inverts that operation, replacing the pfn values with
+** the (now known) appropriate mfn values.
*/
-int uncanonicalize_pagetable(unsigned long type, void *page)
-{
- int i, pte_last;
- unsigned long pfn;
- uint64_t pte;
-
- pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
+int uncanonicalize_pagetable(unsigned long type, void *page)
+{
+ int i, pte_last;
+ unsigned long pfn;
+ uint64_t pte;
+
+ pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
/* Now iterate through the page table, uncanonicalizing each PTE */
- for(i = 0; i < pte_last; i++) {
-
- if(pt_levels == 2)
- pte = ((uint32_t *)page)[i];
- else
- pte = ((uint64_t *)page)[i];
-
- if(pte & _PAGE_PRESENT) {
+ for(i = 0; i < pte_last; i++) {
+
+ if(pt_levels == 2)
+ pte = ((uint32_t *)page)[i];
+ else
+ pte = ((uint64_t *)page)[i];
+
+ if(pte & _PAGE_PRESENT) {
pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
-
- if(pfn >= max_pfn) {
+
+ if(pfn >= max_pfn) {
/* This "page table page" is probably not one; bail. */
ERR("Frame number in type %lu page table is out of range: "
- "i=%d pfn=0x%lx max_pfn=%lu",
+ "i=%d pfn=0x%lx max_pfn=%lu",
type >> 28, i, pfn, max_pfn);
- return 0;
- }
-
-
+ return 0;
+ }
+
+
pte &= 0xffffff0000000fffULL;
pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
- if(pt_levels == 2)
- ((uint32_t *)page)[i] = (uint32_t)pte;
- else
- ((uint64_t *)page)[i] = (uint64_t)pte;
-
-
-
- }
- }
-
- return 1;
+ if(pt_levels == 2)
+ ((uint32_t *)page)[i] = (uint32_t)pte;
+ else
+ ((uint64_t *)page)[i] = (uint64_t)pte;
+
+
+
+ }
+ }
+
+ return 1;
}
-int xc_linux_restore(int xc_handle, int io_fd,
- uint32_t dom, unsigned long nr_pfns,
+int xc_linux_restore(int xc_handle, int io_fd,
+ uint32_t dom, unsigned long nr_pfns,
unsigned int store_evtchn, unsigned long *store_mfn,
unsigned int console_evtchn, unsigned long *console_mfn)
{
DECLARE_DOM0_OP;
int rc = 1, i, n;
- unsigned long mfn, pfn;
+ unsigned long mfn, pfn;
unsigned int prev_pc, this_pc;
int verify = 0;
- int nraces = 0;
+ int nraces = 0;
/* The new domain's shared-info frame number. */
unsigned long shared_info_frame;
unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */
shared_info_t *shared_info = (shared_info_t *)shared_info_page;
-
+
/* A copy of the CPU context of the guest. */
vcpu_guest_context_t ctxt;
@@ -135,7 +135,7 @@
unsigned long *page = NULL;
/* A copy of the pfn-to-mfn table frame list. */
- unsigned long *p2m_frame_list = NULL;
+ unsigned long *p2m_frame_list = NULL;
/* A temporary mapping of the guest's start_info page. */
start_info_t *start_info;
@@ -148,17 +148,17 @@
unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
struct mmuext_op pin[MAX_PIN_BATCH];
- unsigned int nr_pins;
-
-
- max_pfn = nr_pfns;
+ unsigned int nr_pins;
+
+
+ max_pfn = nr_pfns;
DPRINTF("xc_linux_restore start: max_pfn = %lx\n", max_pfn);
- if(!get_platform_info(xc_handle, dom,
+ if(!get_platform_info(xc_handle, dom,
&max_mfn, &hvirt_start, &pt_levels)) {
- ERR("Unable to get platform info.");
+ ERR("Unable to get platform info.");
return 1;
}
@@ -171,20 +171,20 @@
/* Read the saved P2M frame list */
- if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
+ if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
ERR("Couldn't allocate p2m_frame_list array");
goto out;
}
-
- if (!read_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
+
+ if (!read_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
ERR("read p2m_frame_list failed");
goto out;
}
-
+
/* We want zeroed memory so use calloc rather than malloc. */
- p2m = calloc(sizeof(unsigned long), max_pfn);
- pfn_type = calloc(sizeof(unsigned long), max_pfn);
+ p2m = calloc(sizeof(unsigned long), max_pfn);
+ pfn_type = calloc(sizeof(unsigned long), max_pfn);
region_mfn = calloc(sizeof(unsigned long), MAX_BATCH_SIZE);
if ((p2m == NULL) || (pfn_type == NULL) || (region_mfn == NULL)) {
@@ -192,7 +192,7 @@
errno = ENOMEM;
goto out;
}
-
+
if (mlock(region_mfn, sizeof(unsigned long) * MAX_BATCH_SIZE)) {
ERR("Could not mlock region_mfn");
goto out;
@@ -207,27 +207,27 @@
}
shared_info_frame = op.u.getdomaininfo.shared_info_frame;
- if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
+ if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
errno = ENOMEM;
goto out;
}
-
+
if(xc_domain_memory_increase_reservation(
- xc_handle, dom, max_pfn, 0, 0, NULL) != 0) {
+ xc_handle, dom, max_pfn, 0, 0, NULL) != 0) {
ERR("Failed to increase reservation by %lx KB", PFN_TO_KB(max_pfn));
errno = ENOMEM;
goto out;
}
- DPRINTF("Increased domain reservation by %lx KB\n", PFN_TO_KB(max_pfn));
+ DPRINTF("Increased domain reservation by %lx KB\n", PFN_TO_KB(max_pfn));
/* Build the pfn-to-mfn table. We choose MFN ordering returned by Xen. */
if (xc_get_pfn_list(xc_handle, dom, p2m, max_pfn) != max_pfn) {
ERR("Did not read correct number of frame numbers for new dom");
goto out;
}
-
- if(!(mmu = xc_init_mmu_updates(xc_handle, dom))) {
+
+ if(!(mmu = xc_init_mmu_updates(xc_handle, dom))) {
ERR("Could not initialise for MMU updates");
goto out;
}
@@ -242,7 +242,7 @@
prev_pc = 0;
n = 0;
- while (1) {
+ while (1) {
int j;
@@ -253,13 +253,13 @@
prev_pc = this_pc;
}
- if (!read_exact(io_fd, &j, sizeof(int))) {
+ if (!read_exact(io_fd, &j, sizeof(int))) {
ERR("Error when reading batch size");
goto out;
}
PPRINTF("batch %d\n",j);
-
+
if (j == -1) {
verify = 1;
fprintf(stderr, "Entering page verify mode\n");
@@ -269,27 +269,27 @@
if (j == 0)
break; /* our work here is done */
- if (j > MAX_BATCH_SIZE) {
+ if (j > MAX_BATCH_SIZE) {
ERR("Max batch size exceeded. Giving up.");
goto out;
}
-
- if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) {
+
+ if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) {
ERR("Error when reading region pfn types");
goto out;
}
- for (i = 0; i < j; i++) {
+ for (i = 0; i < j; i++) {
if ((region_pfn_type[i] & LTAB_MASK) == XTAB)
region_mfn[i] = 0; /* we know map will fail, but don't care */
- else
- region_mfn[i] = p2m[region_pfn_type[i] & ~LTAB_MASK];
-
- }
-
+ else
+ region_mfn[i] = p2m[region_pfn_type[i] & ~LTAB_MASK];
+
+ }
+
if (!(region_base = xc_map_foreign_batch(
- xc_handle, dom, PROT_WRITE, region_mfn, j))) {
+ xc_handle, dom, PROT_WRITE, region_mfn, j))) {
ERR("map batch failed");
goto out;
}
@@ -297,12 +297,12 @@
for ( i = 0; i < j; i++ )
{
void *page;
- unsigned long pagetype;
+ unsigned long pagetype;
pfn = region_pfn_type[i] & ~LTAB_MASK;
- pagetype = region_pfn_type[i] & LTAB_MASK;
-
- if (pagetype == XTAB)
+ pagetype = region_pfn_type[i] & LTAB_MASK;
+
+ if (pagetype == XTAB)
/* a bogus/unmapped page: skip it */
continue;
@@ -311,72 +311,72 @@
goto out;
}
- pfn_type[pfn] = pagetype;
+ pfn_type[pfn] = pagetype;
mfn = p2m[pfn];
/* In verify mode, we use a copy; otherwise we work in place */
- page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
-
- if (!read_exact(io_fd, page, PAGE_SIZE)) {
+ page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
+
+ if (!read_exact(io_fd, page, PAGE_SIZE)) {
ERR("Error when reading page (type was %lx)", pagetype);
goto out;
}
- pagetype &= LTABTYPE_MASK;
-
- if(pagetype >= L1TAB && pagetype <= L4TAB) {
-
- /*
- ** A page table page - need to 'uncanonicalize' it, i.e.
- ** replace all the references to pfns with the corresponding
- ** mfns for the new domain.
- **
- ** On PAE we need to ensure that PGDs are in MFNs < 4G, and
- ** so we may need to update the p2m after the main loop.
- ** Hence we defer canonicalization of L1s until then.
+ pagetype &= LTABTYPE_MASK;
+
+ if(pagetype >= L1TAB && pagetype <= L4TAB) {
+
+ /*
+ ** A page table page - need to 'uncanonicalize' it, i.e.
+ ** replace all the references to pfns with the corresponding
+ ** mfns for the new domain.
+ **
+ ** On PAE we need to ensure that PGDs are in MFNs < 4G, and
+ ** so we may need to update the p2m after the main loop.
+ ** Hence we defer canonicalization of L1s until then.
*/
- if(pt_levels != 3 || pagetype != L1TAB) {
+ if(pt_levels != 3 || pagetype != L1TAB) {
if(!uncanonicalize_pagetable(pagetype, page)) {
- /*
+ /*
** Failing to uncanonicalize a page table can be ok
** under live migration since the pages type may have
- ** changed by now (and we'll get an update later).
+ ** changed by now (and we'll get an update later).
*/
- DPRINTF("PT L%ld race on pfn=%08lx mfn=%08lx\n",
- pagetype >> 28, pfn, mfn);
- nraces++;
- continue;
+ DPRINTF("PT L%ld race on pfn=%08lx mfn=%08lx\n",
+ pagetype >> 28, pfn, mfn);
+ nraces++;
+ continue;
}
- }
-
- } else if(pagetype != NOTAB) {
+ }
+
+ } else if(pagetype != NOTAB) {
ERR("Bogus page type %lx page table is out of range: "
"i=%d max_pfn=%lu", pagetype, i, max_pfn);
goto out;
- }
+ }
if (verify) {
int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE);
- if (res) {
+ if (res) {
int v;
DPRINTF("************** pfn=%lx type=%lx gotcs=%08lx "
- "actualcs=%08lx\n", pfn, pfn_type[pfn],
- csum_page(region_base + i*PAGE_SIZE),
+ "actualcs=%08lx\n", pfn, pfn_type[pfn],
+ csum_page(region_base + i*PAGE_SIZE),
csum_page(buf));
for (v = 0; v < 4; v++) {
-
- unsigned long *p = (unsigned long *)
+
+ unsigned long *p = (unsigned long *)
(region_base + i*PAGE_SIZE);
if (buf[v] != p[v])
DPRINTF(" %d: %08lx %08lx\n", v, buf[v], p[v]);
@@ -384,8 +384,8 @@
}
}
- if (xc_add_mmu_update(xc_handle, mmu,
- (((unsigned long long)mfn) << PAGE_SHIFT)
+ if (xc_add_mmu_update(xc_handle, mmu,
+ (((unsigned long long)mfn) << PAGE_SHIFT)
| MMU_MACHPHYS_UPDATE, pfn)) {
ERR("failed machpys update mfn=%lx pfn=%lx", mfn, pfn);
goto out;
@@ -398,149 +398,149 @@
DPRINTF("Received all pages (%d races)\n", nraces);
- if(pt_levels == 3) {
-
- /*
- ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This
+ if(pt_levels == 3) {
+
+ /*
+ ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This
** is a little awkward and involves (a) finding all such PGDs and
- ** replacing them with 'lowmem' versions; (b) upating the p2m[]
+ ** replacing them with 'lowmem' versions; (b) upating the p2m[]
** with the new info; and (c) canonicalizing all the L1s using the
- ** (potentially updated) p2m[].
- **
+ ** (potentially updated) p2m[].
+ **
** This is relatively slow (and currently involves two passes through
** the pfn_type[] array), but at least seems to be correct. May wish
- ** to consider more complex approaches to optimize this later.
+ ** to consider more complex approaches to optimize this later.
*/
- int j, k;
+ int j, k;
/* First pass: find all L3TABs current in > 4G mfns and get new mfns */
for (i = 0; i < max_pfn; i++) {
-
+
if (((pfn_type[i] & LTABTYPE_MASK)==L3TAB) && (p2m[i]>0xfffffUL)) {
- unsigned long new_mfn;
- uint64_t l3ptes[4];
- uint64_t *l3tab;
+ unsigned long new_mfn;
+ uint64_t l3ptes[4];
+ uint64_t *l3tab;
l3tab = (uint64_t *)
- xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
- PROT_READ, p2m[i]);
-
- for(j = 0; j < 4; j++)
- l3ptes[j] = l3tab[j];
-
- munmap(l3tab, PAGE_SIZE);
+ xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ, p2m[i]);
+
+ for(j = 0; j < 4; j++)
+ l3ptes[j] = l3tab[j];
+
+ munmap(l3tab, PAGE_SIZE);
if (!(new_mfn=xc_make_page_below_4G(xc_handle, dom, p2m[i]))) {
ERR("Couldn't get a page below 4GB :-(");
goto out;
}
-
+
p2m[i] = new_mfn;
- if (xc_add_mmu_update(xc_handle, mmu,
- (((unsigned long long)new_mfn)
- << PAGE_SHIFT) |
+ if (xc_add_mmu_update(xc_handle, mmu,
+ (((unsigned long long)new_mfn)
+ << PAGE_SHIFT) |
MMU_MACHPHYS_UPDATE, i)) {
ERR("Couldn't m2p on PAE root pgdir");
goto out;
}
-
+
l3tab = (uint64_t *)
- xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
- PROT_READ | PROT_WRITE, p2m[i]);
-
- for(j = 0; j < 4; j++)
- l3tab[j] = l3ptes[j];
-
- munmap(l3tab, PAGE_SIZE);
-
+ xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ | PROT_WRITE, p2m[i]);
+
+ for(j = 0; j < 4; j++)
+ l3tab[j] = l3ptes[j];
+
+ munmap(l3tab, PAGE_SIZE);
+
}
}
/* Second pass: find all L1TABs and uncanonicalize them */
- j = 0;
-
- for(i = 0; i < max_pfn; i++) {
-
- if (((pfn_type[i] & LTABTYPE_MASK)==L1TAB)) {
- region_mfn[j] = p2m[i];
- j++;
- }
-
- if(i == (max_pfn-1) || j == MAX_BATCH_SIZE) {
+ j = 0;
+
+ for(i = 0; i < max_pfn; i++) {
+
+ if (((pfn_type[i] & LTABTYPE_MASK)==L1TAB)) {
+ region_mfn[j] = p2m[i];
+ j++;
+ }
+
+ if(i == (max_pfn-1) || j == MAX_BATCH_SIZE) {
if (!(region_base = xc_map_foreign_batch(
- xc_handle, dom, PROT_READ | PROT_WRITE,
- region_mfn, j))) {
+ xc_handle, dom, PROT_READ | PROT_WRITE,
+ region_mfn, j))) {
ERR("map batch failed");
goto out;
}
for(k = 0; k < j; k++) {
- if(!uncanonicalize_pagetable(L1TAB,
+ if(!uncanonicalize_pagetable(L1TAB,
region_base + k*PAGE_SIZE)) {
- ERR("failed uncanonicalize pt!");
- goto out;
- }
+ ERR("failed uncanonicalize pt!");
+ goto out;
+ }
}
-
- munmap(region_base, j*PAGE_SIZE);
- j = 0;
- }
- }
-
- }
-
-
- if (xc_finish_mmu_updates(xc_handle, mmu)) {
- ERR("Error doing finish_mmu_updates()");
- goto out;
- }
+
+ munmap(region_base, j*PAGE_SIZE);
+ j = 0;
+ }
+ }
+
+ }
+
+
+ if (xc_finish_mmu_updates(xc_handle, mmu)) {
+ ERR("Error doing finish_mmu_updates()");
+ goto out;
+ }
/*
* Pin page tables. Do this after writing to them as otherwise Xen
* will barf when doing the type-checking.
*/
- nr_pins = 0;
+ nr_pins = 0;
for (i = 0; i < max_pfn; i++) {
if (i == (max_pfn-1) || nr_pins == MAX_PIN_BATCH) {
- if (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) {
- ERR("Failed to pin batch of %d page tables", nr_pins);
+ if (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) {
+ ERR("Failed to pin batch of %d page tables", nr_pins);
goto out;
- }
+ }
nr_pins = 0;
}
if ( (pfn_type[i] & LPINTAB) == 0 )
continue;
- switch(pfn_type[i]) {
-
- case (L1TAB|LPINTAB):
+ switch(pfn_type[i]) {
+
+ case (L1TAB|LPINTAB):
pin[nr_pins].cmd = MMUEXT_PIN_L1_TABLE;
- break;
-
- case (L2TAB|LPINTAB):
+ break;
+
+ case (L2TAB|LPINTAB):
pin[nr_pins].cmd = MMUEXT_PIN_L2_TABLE;
- break;
-
- case (L3TAB|LPINTAB):
+ break;
+
+ case (L3TAB|LPINTAB):
pin[nr_pins].cmd = MMUEXT_PIN_L3_TABLE;
- break;
+ break;
case (L4TAB|LPINTAB):
pin[nr_pins].cmd = MMUEXT_PIN_L4_TABLE;
- break;
-
- default:
- continue;
+ break;
+
+ default:
+ continue;
}
pin[nr_pins].arg1.mfn = p2m[i];
- nr_pins++;
+ nr_pins++;
}
@@ -553,17 +553,17 @@
unsigned long *pfntab;
int rc;
- if (!read_exact(io_fd, &count, sizeof(count))) {
+ if (!read_exact(io_fd, &count, sizeof(count))) {
ERR("Error when reading pfn count");
goto out;
}
- if(!(pfntab = malloc(sizeof(unsigned long) * count))) {
+ if(!(pfntab = malloc(sizeof(unsigned long) * count))) {
ERR("Out of memory");
goto out;
}
-
- if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
+
+ if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
ERR("Error when reading pfntab");
goto out;
}
@@ -572,14 +572,14 @@
unsigned long pfn = pfntab[i];
- if(pfn > max_pfn)
+ if(pfn > max_pfn)
/* shouldn't happen - continue optimistically */
- continue;
-
- pfntab[i] = p2m[pfn];
- p2m[pfn] = INVALID_P2M_ENTRY; // not in pseudo-physical map
- }
-
+ continue;
+
+ pfntab[i] = p2m[pfn];
+ p2m[pfn] = INVALID_P2M_ENTRY; // not in pseudo-physical map
+ }
+
if (count > 0) {
struct xen_memory_reservation reservation = {
@@ -590,16 +590,16 @@
};
if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
- &reservation)) != count) {
+ &reservation)) != count) {
ERR("Could not decrease reservation : %d", rc);
goto out;
} else
DPRINTF("Decreased reservation by %d pages\n", count);
- }
- }
-
- if (!read_exact(io_fd, &ctxt, sizeof(ctxt)) ||
- !read_exact(io_fd, shared_info_page, PAGE_SIZE)) {
+ }
+ }
+
+ if (!read_exact(io_fd, &ctxt, sizeof(ctxt)) ||
+ !read_exact(io_fd, shared_info_page, PAGE_SIZE)) {
ERR("Error when reading ctxt or shared info page");
goto out;
}
@@ -642,15 +642,15 @@
if (pfn >= max_pfn) {
ERR("PT base is bad: pfn=%lu max_pfn=%lu type=%08lx",
- pfn, max_pfn, pfn_type[pfn]);
- goto out;
- }
-
- if ( (pfn_type[pfn] & LTABTYPE_MASK) !=
+ pfn, max_pfn, pfn_type[pfn]);
+ goto out;
+ }
+
+ if ( (pfn_type[pfn] & LTABTYPE_MASK) !=
((unsigned long)pt_levels<<LTAB_SHIFT) ) {
ERR("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
- pfn, max_pfn, pfn_type[pfn],
- (unsigned long)pt_levels<<LTAB_SHIFT);
+ pfn, max_pfn, pfn_type[pfn],
+ (unsigned long)pt_levels<<LTAB_SHIFT);
goto out;
}
@@ -667,7 +667,7 @@
xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
memcpy(page, shared_info, sizeof(shared_info_t));
munmap(page, PAGE_SIZE);
-
+
/* Uncanonicalise the pfn-to-mfn table frame-number list. */
for (i = 0; i < P2M_FL_ENTRIES; i++) {
pfn = p2m_frame_list[i];
@@ -678,16 +678,16 @@
p2m_frame_list[i] = p2m[pfn];
}
-
+
/* Copy the P2M we've constructed to the 'live' P2M */
- if (!(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
+ if (!(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
p2m_frame_list, P2M_FL_ENTRIES))) {
ERR("Couldn't map p2m table");
goto out;
}
- memcpy(live_p2m, p2m, P2M_SIZE);
- munmap(live_p2m, P2M_SIZE);
+ memcpy(live_p2m, p2m, P2M_SIZE);
+ munmap(live_p2m, P2M_SIZE);
/*
* Safety checking of saved context:
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_linux_save.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_linux_save.c
- *
+ *
* Save the state of a running Linux session.
- *
+ *
* Copyright (c) 2003, K A Fraser.
*/
@@ -17,23 +17,23 @@
/*
** Default values for important tuning parameters. Can override by passing
-** non-zero replacement values to xc_linux_save().
+** non-zero replacement values to xc_linux_save().
**
-** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
-**
+** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
+**
*/
-#define DEF_MAX_ITERS 29 /* limit us to 30 times round loop */
+#define DEF_MAX_ITERS 29 /* limit us to 30 times round loop */
#define DEF_MAX_FACTOR 3 /* never send more than 3x nr_pfns */
/* max mfn of the whole machine */
-static unsigned long max_mfn;
+static unsigned long max_mfn;
/* virtual starting address of the hypervisor */
-static unsigned long hvirt_start;
+static unsigned long hvirt_start;
/* #levels of page tables used by the currrent guest */
-static unsigned int pt_levels;
+static unsigned int pt_levels;
/* total number of pages used by the current guest */
static unsigned long max_pfn;
@@ -56,8 +56,8 @@
(((_mfn) < (max_mfn)) && \
((mfn_to_pfn(_mfn) < (max_pfn)) && \
(live_p2m[mfn_to_pfn(_mfn)] == (_mfn))))
-
-
+
+
/* Returns TRUE if MFN is successfully converted to a PFN. */
#define translate_mfn_to_pfn(_pmfn) \
({ \
@@ -70,12 +70,12 @@
_res; \
})
-/*
-** During (live) save/migrate, we maintain a number of bitmaps to track
-** which pages we have to send, to fixup, and to skip.
+/*
+** During (live) save/migrate, we maintain a number of bitmaps to track
+** which pages we have to send, to fixup, and to skip.
*/
-#define BITS_PER_LONG (sizeof(unsigned long) * 8)
+#define BITS_PER_LONG (sizeof(unsigned long) * 8)
#define BITMAP_SIZE ((max_pfn + BITS_PER_LONG - 1) / 8)
#define BITMAP_ENTRY(_nr,_bmap) \
@@ -85,17 +85,17 @@
static inline int test_bit (int nr, volatile void * addr)
{
- return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
+ return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
}
static inline void clear_bit (int nr, volatile void * addr)
{
- BITMAP_ENTRY(nr, addr) &= ~(1 << BITMAP_SHIFT(nr));
+ BITMAP_ENTRY(nr, addr) &= ~(1 << BITMAP_SHIFT(nr));
}
static inline void set_bit ( int nr, volatile void * addr)
{
- BITMAP_ENTRY(nr, addr) |= (1 << BITMAP_SHIFT(nr));
+ BITMAP_ENTRY(nr, addr) |= (1 << BITMAP_SHIFT(nr));
}
/* Returns the hamming weight (i.e. the number of bits set) in a N-bit word */
@@ -122,7 +122,7 @@
{
/* Need a simple permutation function so that we scan pages in a
pseudo random order, enabling us to get a better estimate of
- the domain's page dirtying rate as we go (there are often
+ the domain's page dirtying rate as we go (there are often
contiguous ranges of pfns that have similar behaviour, and we
want to mix them up. */
@@ -130,21 +130,21 @@
/* 512MB domain, 128k pages, order 17 */
/*
- QPONMLKJIHGFEDCBA
- QPONMLKJIH
- GFEDCBA
+ QPONMLKJIHGFEDCBA
+ QPONMLKJIH
+ GFEDCBA
*/
-
+
/*
- QPONMLKJIHGFEDCBA
- EDCBA
+ QPONMLKJIHGFEDCBA
+ EDCBA
QPONM
LKJIHGF
*/
do { i = ((i>>(order_nr-10)) | ( i<<10 ) ) & ((1<<order_nr)-1); }
while ( i >= nr ); /* this won't ever loop if nr is a power of 2 */
-
+
return i;
}
@@ -165,7 +165,7 @@
static uint64_t tv_delta(struct timeval *new, struct timeval *old)
{
- return ((new->tv_sec - old->tv_sec)*1000000 ) +
+ return ((new->tv_sec - old->tv_sec)*1000000 ) +
(new->tv_usec - old->tv_usec);
}
@@ -175,7 +175,7 @@
/*
** We control the rate at which we transmit (or save) to minimize impact
-** on running domains (including the target if we're doing live migrate).
+** on running domains (including the target if we're doing live migrate).
*/
#define MAX_MBIT_RATE 500 /* maximum transmit rate for migrate */
@@ -193,10 +193,10 @@
static int mbit_rate, ombit_rate = 0;
/* Have we reached the maximum transmission rate? */
-#define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE)
-
-
-static inline void initialize_mbit_rate()
+#define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE)
+
+
+static inline void initialize_mbit_rate()
{
mbit_rate = START_MBIT_RATE;
}
@@ -213,7 +213,7 @@
if (START_MBIT_RATE == 0)
return write(io_fd, buf, n);
-
+
budget -= n;
if (budget < 0) {
if (mbit_rate != ombit_rate) {
@@ -253,46 +253,46 @@
#else /* ! ADAPTIVE SAVE */
-#define RATE_IS_MAX() (0)
-#define ratewrite(_io_fd, _buf, _n) write((_io_fd), (_buf), (_n))
-#define initialize_mbit_rate()
+#define RATE_IS_MAX() (0)
+#define ratewrite(_io_fd, _buf, _n) write((_io_fd), (_buf), (_n))
+#define initialize_mbit_rate()
#endif
static inline ssize_t write_exact(int fd, void *buf, size_t count)
{
- if(write(fd, buf, count) != count)
- return 0;
- return 1;
-}
-
-
-
-static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
+ if(write(fd, buf, count) != count)
+ return 0;
+ return 1;
+}
+
+
+
+static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
xc_shadow_control_stats_t *stats, int print)
{
static struct timeval wall_last;
static long long d0_cpu_last;
static long long d1_cpu_last;
-
+
struct timeval wall_now;
long long wall_delta;
long long d0_cpu_now, d0_cpu_delta;
long long d1_cpu_now, d1_cpu_delta;
-
+
gettimeofday(&wall_now, NULL);
-
+
d0_cpu_now = xc_domain_get_cpu_usage(xc_handle, 0, /* FIXME */ 0)/1000;
d1_cpu_now = xc_domain_get_cpu_usage(xc_handle, domid, /* FIXME */ 0)/1000;
- if ( (d0_cpu_now == -1) || (d1_cpu_now == -1) )
+ if ( (d0_cpu_now == -1) || (d1_cpu_now == -1) )
fprintf(stderr, "ARRHHH!!\n");
-
+
wall_delta = tv_delta(&wall_now,&wall_last)/1000;
-
+
if (wall_delta == 0) wall_delta = 1;
-
+
d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;
@@ -300,14 +300,14 @@
fprintf(stderr,
"delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
"dirtied %dMb/s %" PRId32 " pages\n",
- wall_delta,
+ wall_delta,
(int)((d0_cpu_delta*100)/wall_delta),
(int)((d1_cpu_delta*100)/wall_delta),
(int)((pages_sent*PAGE_SIZE)/(wall_delta*(1000/8))),
(int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))),
stats->dirty_count);
-#ifdef ADAPTIVE_SAVE
+#ifdef ADAPTIVE_SAVE
if (((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate) {
mbit_rate = (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8)))
+ 50;
@@ -315,16 +315,16 @@
mbit_rate = MAX_MBIT_RATE;
}
#endif
-
+
d0_cpu_last = d0_cpu_now;
d1_cpu_last = d1_cpu_now;
- wall_last = wall_now;
+ wall_last = wall_now;
return 0;
}
-static int analysis_phase(int xc_handle, uint32_t domid, int max_pfn,
+static int analysis_phase(int xc_handle, uint32_t domid, int max_pfn,
unsigned long *arr, int runs)
{
long long start, now;
@@ -335,24 +335,24 @@
for (j = 0; j < runs; j++) {
int i;
-
+
xc_shadow_control(xc_handle, domid, DOM0_SHADOW_CONTROL_OP_CLEAN,
arr, max_pfn, NULL);
fprintf(stderr, "#Flush\n");
- for ( i = 0; i < 40; i++ ) {
- usleep(50000);
+ for ( i = 0; i < 40; i++ ) {
+ usleep(50000);
now = llgettimeofday();
xc_shadow_control(xc_handle, domid, DOM0_SHADOW_CONTROL_OP_PEEK,
NULL, 0, &stats);
-
+
fprintf(stderr, "now= %lld faults= %" PRId32 " dirty= %" PRId32
- " dirty_net= %" PRId32 " dirty_block= %" PRId32"\n",
- ((now-start)+500)/1000,
+ " dirty_net= %" PRId32 " dirty_block= %" PRId32"\n",
+ ((now-start)+500)/1000,
stats.fault_count, stats.dirty_count,
stats.dirty_net_count, stats.dirty_block_count);
}
}
-
+
return -1;
}
@@ -375,7 +375,7 @@
return -1;
}
- if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
+ if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
ERR("Could not get vcpu context");
@@ -383,22 +383,22 @@
return 0; // success
if (info->paused) {
- // try unpausing domain, wait, and retest
+ // try unpausing domain, wait, and retest
xc_domain_unpause( xc_handle, dom );
-
+
ERR("Domain was paused. Wait and re-test.");
usleep(10000); // 10ms
-
+
goto retry;
}
if( ++i < 100 ) {
ERR("Retry suspend domain.");
- usleep(10000); // 10ms
+ usleep(10000); // 10ms
goto retry;
}
-
+
ERR("Unable to suspend domain.");
return -1;
@@ -406,173 +406,173 @@
/*
-** During transfer (or in the state file), all page-table pages must be
-** converted into a 'canonical' form where references to actual mfns
-** are replaced with references to the corresponding pfns.
+** During transfer (or in the state file), all page-table pages must be
+** converted into a 'canonical' form where references to actual mfns
+** are replaced with references to the corresponding pfns.
**
-** This function performs the appropriate conversion, taking into account
-** which entries do not require canonicalization (in particular, those
-** entries which map the virtual address reserved for the hypervisor).
+** This function performs the appropriate conversion, taking into account
+** which entries do not require canonicalization (in particular, those
+** entries which map the virtual address reserved for the hypervisor).
*/
-void canonicalize_pagetable(unsigned long type, unsigned long pfn,
- const void *spage, void *dpage)
-{
-
+void canonicalize_pagetable(unsigned long type, unsigned long pfn,
+ const void *spage, void *dpage)
+{
+
int i, pte_last, xen_start, xen_end;
uint64_t pte;
- /*
+ /*
** We need to determine which entries in this page table hold
** reserved hypervisor mappings. This depends on the current
- ** page table type as well as the number of paging levels.
+ ** page table type as well as the number of paging levels.
*/
- xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
-
+ xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
+
if (pt_levels == 2 && type == L2TAB)
- xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
-
- if (pt_levels == 3 && type == L3TAB)
- xen_start = L3_PAGETABLE_ENTRIES_PAE;
-
- /*
- ** in PAE only the L2 mapping the top 1GB contains Xen mappings.
+ xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
+
+ if (pt_levels == 3 && type == L3TAB)
+ xen_start = L3_PAGETABLE_ENTRIES_PAE;
+
+ /*
+ ** in PAE only the L2 mapping the top 1GB contains Xen mappings.
** We can spot this by looking for the guest linear mapping which
- ** Xen always ensures is present in that L2. Guests must ensure
- ** that this check will fail for other L2s.
+ ** Xen always ensures is present in that L2. Guests must ensure
+ ** that this check will fail for other L2s.
*/
if (pt_levels == 3 && type == L2TAB) {
/* XXX index of the L2 entry in PAE mode which holds the guest LPT */
-#define PAE_GLPT_L2ENTRY (495)
- pte = ((uint64_t*)spage)[PAE_GLPT_L2ENTRY];
+#define PAE_GLPT_L2ENTRY (495)
+ pte = ((uint64_t*)spage)[PAE_GLPT_L2ENTRY];
if(((pte >> PAGE_SHIFT) & 0x0fffffff) == live_p2m[pfn])
- xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
- }
-
- if (pt_levels == 4 && type == L4TAB) {
+ xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
+ }
+
+ if (pt_levels == 4 && type == L4TAB) {
/*
- ** XXX SMH: should compute these from hvirt_start (which we have)
- ** and hvirt_end (which we don't)
+ ** XXX SMH: should compute these from hvirt_start (which we have)
+ ** and hvirt_end (which we don't)
*/
- xen_start = 256;
- xen_end = 272;
+ xen_start = 256;
+ xen_end = 272;
}
/* Now iterate through the page table, canonicalizing each PTE */
for (i = 0; i < pte_last; i++ ) {
- unsigned long pfn, mfn;
-
+ unsigned long pfn, mfn;
+
if (pt_levels == 2)
pte = ((uint32_t*)spage)[i];
else
pte = ((uint64_t*)spage)[i];
-
+
if (i >= xen_start && i < xen_end)
pte = 0;
-
+
if (pte & _PAGE_PRESENT) {
-
- mfn = (pte >> PAGE_SHIFT) & 0xfffffff;
+
+ mfn = (pte >> PAGE_SHIFT) & 0xfffffff;
if (!MFN_IS_IN_PSEUDOPHYS_MAP(mfn)) {
- /* This will happen if the type info is stale which
+ /* This will happen if the type info is stale which
is quite feasible under live migration */
DPRINTF("PT Race: [%08lx,%d] pte=%llx, mfn=%08lx\n",
- type, i, (unsigned long long)pte, mfn);
+ type, i, (unsigned long long)pte, mfn);
pfn = 0; /* zap it - we'll retransmit this page later */
- } else
+ } else
pfn = mfn_to_pfn(mfn);
-
+
pte &= 0xffffff0000000fffULL;
pte |= (uint64_t)pfn << PAGE_SHIFT;
}
-
+
if (pt_levels == 2)
((uint32_t*)dpage)[i] = pte;
else
- ((uint64_t*)dpage)[i] = pte;
-
- }
-
- return;
-}
-
-
-
-static unsigned long *xc_map_m2p(int xc_handle,
- unsigned long max_mfn,
- int prot)
-{
+ ((uint64_t*)dpage)[i] = pte;
+
+ }
+
+ return;
+}
+
+
+
+static unsigned long *xc_map_m2p(int xc_handle,
+ unsigned long max_mfn,
+ int prot)
+{
struct xen_machphys_mfn_list xmml;
- privcmd_mmap_t ioctlx;
- privcmd_mmap_entry_t *entries;
- unsigned long m2p_chunks, m2p_size;
- unsigned long *m2p;
- int i, rc;
-
- m2p_size = M2P_SIZE(max_mfn);
- m2p_chunks = M2P_CHUNKS(max_mfn);
+ privcmd_mmap_t ioctlx;
+ privcmd_mmap_entry_t *entries;
+ unsigned long m2p_chunks, m2p_size;
+ unsigned long *m2p;
+ int i, rc;
+
+ m2p_size = M2P_SIZE(max_mfn);
+ m2p_chunks = M2P_CHUNKS(max_mfn);
xmml.max_extents = m2p_chunks;
- if (!(xmml.extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) {
- ERR("failed to allocate space for m2p mfns");
- return NULL;
- }
+ if (!(xmml.extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) {
+ ERR("failed to allocate space for m2p mfns");
+ return NULL;
+ }
if (xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
(xmml.nr_extents != m2p_chunks)) {
- ERR("xc_get_m2p_mfns");
+ ERR("xc_get_m2p_mfns");
return NULL;
}
- if ((m2p = mmap(NULL, m2p_size, prot,
+ if ((m2p = mmap(NULL, m2p_size, prot,
MAP_SHARED, xc_handle, 0)) == MAP_FAILED) {
- ERR("failed to mmap m2p");
- return NULL;
- }
-
- if (!(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t)))) {
- ERR("failed to allocate space for mmap entries");
- return NULL;
- }
+ ERR("failed to mmap m2p");
+ return NULL;
+ }
+
+ if (!(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t)))) {
+ ERR("failed to allocate space for mmap entries");
+ return NULL;
+ }
ioctlx.num = m2p_chunks;
- ioctlx.dom = DOMID_XEN;
- ioctlx.entry = entries;
-
- for (i=0; i < m2p_chunks; i++) {
- entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE));
+ ioctlx.dom = DOMID_XEN;
+ ioctlx.entry = entries;
+
+ for (i=0; i < m2p_chunks; i++) {
+ entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE));
entries[i].mfn = xmml.extent_start[i];
entries[i].npages = M2P_CHUNK_SIZE >> PAGE_SHIFT;
}
if ((rc = ioctl(xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx)) < 0) {
- ERR("ioctl_mmap failed (rc = %d)", rc);
- return NULL;
+ ERR("ioctl_mmap failed (rc = %d)", rc);
+ return NULL;
}
free(xmml.extent_start);
- free(entries);
-
- return m2p;
-}
-
-
-
-int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
+ free(entries);
+
+ return m2p;
+}
+
+
+
+int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
uint32_t max_factor, uint32_t flags, int (*suspend)(int))
{
xc_dominfo_t info;
int rc = 1, i, j, last_iter, iter = 0;
- int live = (flags & XCFLAGS_LIVE);
- int debug = (flags & XCFLAGS_DEBUG);
+ int live = (flags & XCFLAGS_LIVE);
+ int debug = (flags & XCFLAGS_DEBUG);
int sent_last_iter, skip_this_iter;
/* The new domain's shared-info frame number. */
unsigned long shared_info_frame;
-
+
/* A copy of the CPU context of the guest. */
vcpu_guest_context_t ctxt;
@@ -581,7 +581,7 @@
unsigned long *pfn_batch = NULL;
/* A temporary mapping, and a copy, of one frame of guest memory. */
- char page[PAGE_SIZE];
+ char page[PAGE_SIZE];
/* Double and single indirect references to the live P2M table */
unsigned long *live_p2m_frame_list_list = NULL;
@@ -597,14 +597,14 @@
unsigned char *region_base = NULL;
/* power of 2 order of max_pfn */
- int order_nr;
+ int order_nr;
/* bitmap of pages:
- - that should be sent this iteration (unless later marked as skip);
+ - that should be sent this iteration (unless later marked as skip);
- to skip this iteration because already dirty;
- to fixup by sending at the end if not already resent; */
unsigned long *to_send = NULL, *to_skip = NULL, *to_fix = NULL;
-
+
xc_shadow_control_stats_t stats;
unsigned long needed_to_fix = 0;
@@ -612,29 +612,29 @@
/* If no explicit control parameters given, use defaults */
- if(!max_iters)
- max_iters = DEF_MAX_ITERS;
- if(!max_factor)
- max_factor = DEF_MAX_FACTOR;
-
- initialize_mbit_rate();
-
- if(!get_platform_info(xc_handle, dom,
+ if(!max_iters)
+ max_iters = DEF_MAX_ITERS;
+ if(!max_factor)
+ max_factor = DEF_MAX_FACTOR;
+
+ initialize_mbit_rate();
+
+ if(!get_platform_info(xc_handle, dom,
&max_mfn, &hvirt_start, &pt_levels)) {
- ERR("Unable to get platform info.");
+ ERR("Unable to get platform info.");
return 1;
}
if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
ERR("Could not get domain info");
- return 1;
+ return 1;
}
if (mlock(&ctxt, sizeof(ctxt))) {
ERR("Unable to mlock ctxt");
return 1;
}
-
+
/* Only have to worry about vcpu 0 even for SMP */
if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) {
ERR("Could not get vcpu context");
@@ -648,16 +648,16 @@
ERR("Domain is not in a valid Linux guest OS state");
goto out;
}
-
+
/* cheesy sanity check */
if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
- ERR("Invalid state record -- pfn count out of range: %lu",
- (info.max_memkb >> (PAGE_SHIFT - 10)));
+ ERR("Invalid state record -- pfn count out of range: %lu",
+ (info.max_memkb >> (PAGE_SHIFT - 10)));
goto out;
}
-
+
/* Map the shared info frame */
- if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
PROT_READ, shared_info_frame))) {
ERR("Couldn't map live_shinfo");
goto out;
@@ -665,8 +665,8 @@
max_pfn = live_shinfo->arch.max_pfn;
- live_p2m_frame_list_list =
- xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
+ live_p2m_frame_list_list =
+ xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
live_shinfo->arch.pfn_to_mfn_frame_list_list);
if (!live_p2m_frame_list_list) {
@@ -674,24 +674,24 @@
goto out;
}
- live_p2m_frame_list =
+ live_p2m_frame_list =
xc_map_foreign_batch(xc_handle, dom, PROT_READ,
live_p2m_frame_list_list,
- P2M_FLL_ENTRIES);
-
+ P2M_FLL_ENTRIES);
+
if (!live_p2m_frame_list) {
ERR("Couldn't map p2m_frame_list");
goto out;
}
- /* Map all the frames of the pfn->mfn table. For migrate to succeed,
- the guest must not change which frames are used for this purpose.
+ /* Map all the frames of the pfn->mfn table. For migrate to succeed,
+ the guest must not change which frames are used for this purpose.
(its not clear why it would want to change them, and we'll be OK
from a safety POV anyhow. */
live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
live_p2m_frame_list,
- P2M_FL_ENTRIES);
+ P2M_FL_ENTRIES);
if (!live_p2m) {
ERR("Couldn't map p2m table");
@@ -699,25 +699,25 @@
}
/* Setup the mfn_to_pfn table mapping */
- if(!(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ))) {
- ERR("Failed to map live M2P table");
- goto out;
- }
-
-
+ if(!(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ))) {
+ ERR("Failed to map live M2P table");
+ goto out;
+ }
+
+
/* Get a local copy of the live_P2M_frame_list */
- if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
+ if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
ERR("Couldn't allocate p2m_frame_list array");
goto out;
}
- memcpy(p2m_frame_list, live_p2m_frame_list, P2M_FL_SIZE);
+ memcpy(p2m_frame_list, live_p2m_frame_list, P2M_FL_SIZE);
/* Canonicalise the pfn-to-mfn table frame-number list. */
for (i = 0; i < max_pfn; i += ulpp) {
- if (!translate_mfn_to_pfn(&p2m_frame_list[i/ulpp])) {
+ if (!translate_mfn_to_pfn(&p2m_frame_list[i/ulpp])) {
ERR("Frame# in pfn-to-mfn frame list is not in pseudophys");
- ERR("entry %d: p2m_frame_list[%ld] is 0x%lx", i, i/ulpp,
- p2m_frame_list[i/ulpp]);
+ ERR("entry %d: p2m_frame_list[%ld] is 0x%lx", i, i/ulpp,
+ p2m_frame_list[i/ulpp]);
goto out;
}
}
@@ -725,31 +725,31 @@
/* Domain is still running at this point */
if (live) {
- if (xc_shadow_control(xc_handle, dom,
+ if (xc_shadow_control(xc_handle, dom,
DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
- NULL, 0, NULL ) < 0) {
+ NULL, 0, NULL ) < 0) {
ERR("Couldn't enable shadow mode");
goto out;
}
-
+
last_iter = 0;
-
+
} else {
-
+
/* This is a non-live suspend. Issue the call back to get the
domain suspended */
-
+
last_iter = 1;
-
+
if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt)) {
ERR("Domain appears not to have suspended");
goto out;
}
-
+
}
/* pretend we sent all the pages last iteration */
- sent_last_iter = max_pfn;
+ sent_last_iter = max_pfn;
/* calculate the power of 2 order of max_pfn, e.g.
@@ -758,15 +758,15 @@
continue;
/* Setup to_send / to_fix and to_skip bitmaps */
- to_send = malloc(BITMAP_SIZE);
- to_fix = calloc(1, BITMAP_SIZE);
- to_skip = malloc(BITMAP_SIZE);
-
+ to_send = malloc(BITMAP_SIZE);
+ to_fix = calloc(1, BITMAP_SIZE);
+ to_skip = malloc(BITMAP_SIZE);
+
if (!to_send || !to_fix || !to_skip) {
ERR("Couldn't allocate to_send array");
goto out;
}
-
+
memset(to_send, 0xff, BITMAP_SIZE);
if (mlock(to_send, BITMAP_SIZE)) {
@@ -779,7 +779,7 @@
ERR("Unable to mlock to_skip");
return 1;
}
-
+
analysis_phase(xc_handle, dom, max_pfn, to_skip, 0);
/* We want zeroed memory so use calloc rather than malloc. */
@@ -787,7 +787,7 @@
pfn_batch = calloc(MAX_BATCH_SIZE, sizeof(unsigned long));
if ((pfn_type == NULL) || (pfn_batch == NULL)) {
- ERR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
+ ERR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
errno = ENOMEM;
goto out;
}
@@ -803,12 +803,12 @@
*/
{
int err=0;
- unsigned long mfn;
+ unsigned long mfn;
for (i = 0; i < max_pfn; i++) {
mfn = live_p2m[i];
- if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) {
- DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i,
+ if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) {
+ DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i,
mfn, mfn_to_pfn(mfn));
err++;
}
@@ -819,16 +819,16 @@
/* Start writing out the saved-domain record. */
- if(!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) {
+ if(!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) {
ERR("write: max_pfn");
goto out;
}
- if(!write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
+ if(!write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
ERR("write: p2m_frame_list");
goto out;
}
-
+
print_stats(xc_handle, dom, 0, &stats, 0);
/* Now write out each data page, canonicalising page tables as we go... */
@@ -853,8 +853,8 @@
DPRINTF("\b\b\b\b%3d%%", this_pc);
prev_pc = this_pc;
}
-
- /* slightly wasteful to peek the whole array evey time,
+
+ /* slightly wasteful to peek the whole array evey time,
but this is fast enough for the moment. */
if (!last_iter && xc_shadow_control(
xc_handle, dom, DOM0_SHADOW_CONTROL_OP_PEEK,
@@ -862,7 +862,7 @@
ERR("Error peeking shadow bitmap");
goto out;
}
-
+
/* load pfn_type[] with the mfn of all the pages we're doing in
this batch. */
@@ -873,11 +873,11 @@
if (debug) {
DPRINTF("%d pfn= %08lx mfn= %08lx %d [mfn]= %08lx\n",
iter, (unsigned long)n, live_p2m[n],
- test_bit(n, to_send),
+ test_bit(n, to_send),
mfn_to_pfn(live_p2m[n]&0xFFFFF));
}
-
- if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip))
+
+ if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip))
skip_this_iter++; /* stats keeping */
if (!((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
@@ -885,13 +885,13 @@
(test_bit(n, to_fix) && last_iter)))
continue;
- /*
+ /*
** we get here if:
** 1. page is marked to_send & hasn't already been re-dirtied
** 2. (ignore to_skip in last iteration)
** 3. add in pages that still need fixup (net bufs)
*/
-
+
pfn_batch[batch] = n;
pfn_type[batch] = live_p2m[n];
@@ -914,80 +914,80 @@
iter,n,pfn_type[batch]);
}
- clear_bit(n, to_fix);
-
+ clear_bit(n, to_fix);
+
batch++;
}
-
+
if (batch == 0)
goto skip; /* vanishingly unlikely... */
-
+
if ((region_base = xc_map_foreign_batch(
- xc_handle, dom, PROT_READ, pfn_type, batch)) == 0) {
+ xc_handle, dom, PROT_READ, pfn_type, batch)) == 0) {
ERR("map batch failed");
goto out;
}
-
+
if (xc_get_pfn_type_batch(xc_handle, dom, batch, pfn_type)) {
ERR("get_pfn_type_batch failed");
goto out;
}
-
+
for (j = 0; j < batch; j++) {
if ((pfn_type[j] & LTAB_MASK) == XTAB) {
DPRINTF("type fail: page %i mfn %08lx\n", j, pfn_type[j]);
continue;
}
-
- if (debug)
+
+ if (debug)
fprintf(stderr, "%d pfn= %08lx mfn= %08lx [mfn]= %08lx"
" sum= %08lx\n",
- iter,
+ iter,
(pfn_type[j] & LTAB_MASK) | pfn_batch[j],
pfn_type[j],
mfn_to_pfn(pfn_type[j]&(~LTAB_MASK)),
csum_page(region_base + (PAGE_SIZE*j)));
-
+
/* canonicalise mfn->pfn */
pfn_type[j] = (pfn_type[j] & LTAB_MASK) | pfn_batch[j];
}
- if(!write_exact(io_fd, &batch, sizeof(unsigned int))) {
+ if(!write_exact(io_fd, &batch, sizeof(unsigned int))) {
ERR("Error when writing to state file (2)");
goto out;
}
- if(!write_exact(io_fd, pfn_type, sizeof(unsigned long)*j)) {
+ if(!write_exact(io_fd, pfn_type, sizeof(unsigned long)*j)) {
ERR("Error when writing to state file (3)");
goto out;
}
-
+
/* entering this loop, pfn_type is now in pfns (Not mfns) */
for (j = 0; j < batch; j++) {
-
- unsigned long pfn = pfn_type[j] & ~LTAB_MASK;
- unsigned long pagetype = pfn_type[j] & LTAB_MASK;
- void *spage = (void *) region_base + (PAGE_SIZE*j);
+
+ unsigned long pfn = pfn_type[j] & ~LTAB_MASK;
+ unsigned long pagetype = pfn_type[j] & LTAB_MASK;
+ void *spage = (void *) region_base + (PAGE_SIZE*j);
/* write out pages in batch */
if (pagetype == XTAB)
continue;
- pagetype &= LTABTYPE_MASK;
-
+ pagetype &= LTABTYPE_MASK;
+
if (pagetype >= L1TAB && pagetype <= L4TAB) {
-
+
/* We have a pagetable page: need to rewrite it. */
- canonicalize_pagetable(pagetype, pfn, spage, page);
-
+ canonicalize_pagetable(pagetype, pfn, spage, page);
+
if (ratewrite(io_fd, page, PAGE_SIZE) != PAGE_SIZE) {
ERR("Error when writing to state file (4)");
goto out;
}
-
- } else {
+
+ } else {
/* We have a normal page: just write it directly. */
if (ratewrite(io_fd, spage, PAGE_SIZE) != PAGE_SIZE) {
@@ -996,36 +996,36 @@
}
}
} /* end of the write out for this batch */
-
+
sent_this_iter += batch;
munmap(region_base, batch*PAGE_SIZE);
-
+
} /* end of this while loop for this iteration */
-
- skip:
-
+
+ skip:
+
total_sent += sent_this_iter;
- DPRINTF("\r %d: sent %d, skipped %d, ",
+ DPRINTF("\r %d: sent %d, skipped %d, ",
iter, sent_this_iter, skip_this_iter );
if (last_iter) {
print_stats( xc_handle, dom, sent_this_iter, &stats, 1);
- DPRINTF("Total pages sent= %ld (%.2fx)\n",
+ DPRINTF("Total pages sent= %ld (%.2fx)\n",
total_sent, ((float)total_sent)/max_pfn );
DPRINTF("(of which %ld were fixups)\n", needed_to_fix );
- }
+ }
if (last_iter && debug){
int minusone = -1;
- memset(to_send, 0xff, BITMAP_SIZE);
+ memset(to_send, 0xff, BITMAP_SIZE);
debug = 0;
fprintf(stderr, "Entering debug resend-all mode\n");
-
+
/* send "-1" to put receiver into debug mode */
- if(!write_exact(io_fd, &minusone, sizeof(int))) {
+ if(!write_exact(io_fd, &minusone, sizeof(int))) {
ERR("Error when writing to state file (6)");
goto out;
}
@@ -1033,34 +1033,34 @@
continue;
}
- if (last_iter) break;
+ if (last_iter) break;
if (live) {
- if(
+ if(
((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
(iter >= max_iters) ||
(sent_this_iter+skip_this_iter < 50) ||
- (total_sent > max_pfn*max_factor) ) {
+ (total_sent > max_pfn*max_factor) ) {
DPRINTF("Start last iteration\n");
last_iter = 1;
-
+
if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
&ctxt)) {
ERR("Domain appears not to have suspended");
goto out;
}
-
- DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
- info.shared_info_frame,
- (unsigned long)ctxt.user_regs.eip,
+
+ DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
+ info.shared_info_frame,
+ (unsigned long)ctxt.user_regs.eip,
(unsigned long)ctxt.user_regs.edx);
- }
-
+ }
+
if (xc_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_CLEAN,
- to_send, max_pfn, &stats ) != max_pfn) {
+ to_send, max_pfn, &stats ) != max_pfn) {
ERR("Error flushing shadow PT");
goto out;
}
@@ -1068,7 +1068,7 @@
sent_last_iter = sent_this_iter;
print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
-
+
}
@@ -1077,8 +1077,8 @@
DPRINTF("All memory is saved\n");
/* Zero terminate */
- i = 0;
- if (!write_exact(io_fd, &i, sizeof(int))) {
+ i = 0;
+ if (!write_exact(io_fd, &i, sizeof(int))) {
ERR("Error when writing to state file (6)");
goto out;
}
@@ -1086,18 +1086,18 @@
/* Send through a list of all the PFNs that were not in map at the close */
{
unsigned int i,j;
- unsigned long pfntab[1024];
+ unsigned long pfntab[1024];
for (i = 0, j = 0; i < max_pfn; i++) {
if (!is_mapped(live_p2m[i]))
j++;
}
-
- if(!write_exact(io_fd, &j, sizeof(unsigned int))) {
+
+ if(!write_exact(io_fd, &j, sizeof(unsigned int))) {
ERR("Error when writing to state file (6a)");
goto out;
- }
-
+ }
+
for (i = 0, j = 0; i < max_pfn; ) {
if (!is_mapped(live_p2m[i]))
@@ -1105,16 +1105,16 @@
i++;
if (j == 1024 || i == max_pfn) {
- if(!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) {
+ if(!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) {
ERR("Error when writing to state file (6b)");
goto out;
- }
+ }
j = 0;
}
}
}
-
+
/* Canonicalise the suspend-record frame number. */
if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) ){
ERR("Suspend record is not in range of pseudophys map");
@@ -1138,7 +1138,7 @@
PAGE_SHIFT;
if (!write_exact(io_fd, &ctxt, sizeof(ctxt)) ||
- !write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
+ !write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
ERR("Error when writing to state file (1)");
goto out;
}
@@ -1149,26 +1149,26 @@
out:
if (live) {
- if(xc_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_OFF,
- NULL, 0, NULL ) < 0) {
+ if(xc_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_OFF,
+ NULL, 0, NULL ) < 0) {
DPRINTF("Warning - couldn't disable shadow mode");
}
}
-
+
if (live_shinfo)
munmap(live_shinfo, PAGE_SIZE);
-
- if (live_p2m_frame_list_list)
- munmap(live_p2m_frame_list_list, PAGE_SIZE);
-
- if (live_p2m_frame_list)
- munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
-
- if(live_p2m)
- munmap(live_p2m, P2M_SIZE);
-
- if(live_m2p)
- munmap(live_m2p, M2P_SIZE(max_mfn));
+
+ if (live_p2m_frame_list_list)
+ munmap(live_p2m_frame_list_list, PAGE_SIZE);
+
+ if (live_p2m_frame_list)
+ munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
+
+ if(live_p2m)
+ munmap(live_p2m, P2M_SIZE);
+
+ if(live_m2p)
+ munmap(live_m2p, M2P_SIZE(max_mfn));
free(pfn_type);
free(pfn_batch);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_load_aout9.c
--- a/tools/libxc/xc_load_aout9.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_load_aout9.c Sat Apr 15 09:07:31 2006
@@ -22,7 +22,7 @@
struct Exec *get_header(const char *, unsigned long, struct Exec *);
-int
+int
probe_aout9(
const char *image,
unsigned long image_size,
@@ -40,7 +40,7 @@
return 0;
}
-static int
+static int
parseaout9image(
const char *image,
unsigned long image_size,
@@ -74,7 +74,7 @@
return 0;
}
-static int
+static int
loadaout9image(
const char *image,
unsigned long image_size,
@@ -123,7 +123,7 @@
if(chunksz > PAGE_SIZE - pgoff)
chunksz = PAGE_SIZE - pgoff;
- pg = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_WRITE,
+ pg = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_WRITE,
parray[off>>PAGE_SHIFT]);
memcpy(pg + pgoff, buf, chunksz);
munmap(pg, PAGE_SIZE);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_load_bin.c
--- a/tools/libxc/xc_load_bin.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_load_bin.c Sat Apr 15 09:07:31 2006
@@ -161,7 +161,7 @@
return NULL;
}
-static int parsebinimage(const char *image,
+static int parsebinimage(const char *image,
unsigned long image_size,
struct domain_setup_info *dsi)
{
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_load_elf.c
--- a/tools/libxc/xc_load_elf.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_load_elf.c Sat Apr 15 09:07:31 2006
@@ -51,7 +51,7 @@
((phdr->p_flags & (PF_W|PF_X)) != 0));
}
-static int parseelfimage(const char *image,
+static int parseelfimage(const char *image,
unsigned long elfsize,
struct domain_setup_info *dsi)
{
@@ -102,10 +102,10 @@
ERROR("ELF image has no section-header strings table (shstrtab).");
return -EINVAL;
}
- shdr = (Elf_Shdr *)(image + ehdr->e_shoff +
+ shdr = (Elf_Shdr *)(image + ehdr->e_shoff +
(ehdr->e_shstrndx*ehdr->e_shentsize));
shstrtab = image + shdr->sh_offset;
-
+
/* Find the special '__xen_guest' section and check its contents. */
for ( h = 0; h < ehdr->e_shnum; h++ )
{
@@ -148,7 +148,7 @@
dsi->xen_guest_string = guestinfo;
- for ( h = 0; h < ehdr->e_phnum; h++ )
+ for ( h = 0; h < ehdr->e_phnum; h++ )
{
phdr = (Elf_Phdr *)(image + ehdr->e_phoff + (h*ehdr->e_phentsize));
if ( !is_loadable_phdr(phdr) )
@@ -159,8 +159,8 @@
kernend = phdr->p_paddr + phdr->p_memsz;
}
- if ( (kernstart > kernend) ||
- (ehdr->e_entry < kernstart) ||
+ if ( (kernstart > kernend) ||
+ (ehdr->e_entry < kernstart) ||
(ehdr->e_entry > kernend) )
{
ERROR("Malformed ELF image.");
@@ -196,12 +196,12 @@
char *va;
unsigned long pa, done, chunksz;
- for ( h = 0; h < ehdr->e_phnum; h++ )
+ for ( h = 0; h < ehdr->e_phnum; h++ )
{
phdr = (Elf_Phdr *)(image + ehdr->e_phoff + (h*ehdr->e_phentsize));
if ( !is_loadable_phdr(phdr) )
continue;
-
+
for ( done = 0; done < phdr->p_filesz; done += chunksz )
{
pa = (phdr->p_paddr + done) - dsi->v_start;
@@ -265,7 +265,7 @@
shdr = (Elf_Shdr *)(p + sizeof(int) + sizeof(Elf_Ehdr));
memcpy(shdr, image + ehdr->e_shoff, ehdr->e_shnum * sizeof(Elf_Shdr));
- for ( h = 0; h < ehdr->e_shnum; h++ )
+ for ( h = 0; h < ehdr->e_shnum; h++ )
{
if ( shdr[h].sh_type == SHT_STRTAB )
{
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_misc.c Sat Apr 15 09:07:31 2006
@@ -1,6 +1,6 @@
/******************************************************************************
* xc_misc.c
- *
+ *
* Miscellaneous control interface functions.
*/
@@ -21,7 +21,7 @@
int xc_readconsolering(int xc_handle,
char **pbuffer,
- unsigned int *pnr_chars,
+ unsigned int *pnr_chars,
int clear)
{
int ret;
@@ -46,14 +46,14 @@
safe_munlock(buffer, nr_chars);
return ret;
-}
+}
int xc_physinfo(int xc_handle,
xc_physinfo_t *put_info)
{
int ret;
DECLARE_DOM0_OP;
-
+
op.cmd = DOM0_PHYSINFO;
op.interface_version = DOM0_INTERFACE_VERSION;
@@ -70,15 +70,15 @@
{
int ret;
DECLARE_DOM0_OP;
-
+
op.cmd = DOM0_SCHED_ID;
op.interface_version = DOM0_INTERFACE_VERSION;
-
+
if ( (ret = do_dom0_op(xc_handle, &op)) != 0 )
return ret;
-
+
*sched_id = op.u.sched_id.sched_id;
-
+
return 0;
}
@@ -100,9 +100,9 @@
long long xc_msr_read(int xc_handle, int cpu_mask, int msr)
{
- int rc;
+ int rc;
DECLARE_DOM0_OP;
-
+
op.cmd = DOM0_MSR;
op.u.msr.write = 0;
op.u.msr.msr = msr;
@@ -116,9 +116,9 @@
int xc_msr_write(int xc_handle, int cpu_mask, int msr, unsigned int low,
unsigned int high)
{
- int rc;
+ int rc;
DECLARE_DOM0_OP;
-
+
op.cmd = DOM0_MSR;
op.u.msr.write = 1;
op.u.msr.msr = msr;
@@ -127,7 +127,7 @@
op.u.msr.in2 = high;
rc = do_dom0_op(xc_handle, &op);
-
+
return rc;
}
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_physdev.c
--- a/tools/libxc/xc_physdev.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_physdev.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_physdev.c
- *
+ *
* API for manipulating physical-device access permissions.
- *
+ *
* Copyright (c) 2004, Rolf Neugebauer (Intel Research Cambridge)
* Copyright (c) 2004, K A Fraser (University of Cambridge)
*/
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_private.c Sat Apr 15 09:07:31 2006
@@ -1,6 +1,6 @@
/******************************************************************************
* xc_private.c
- *
+ *
* Helper functions for the rest of the library.
*/
@@ -10,7 +10,7 @@
void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
unsigned long *arr, int num )
{
- privcmd_mmapbatch_t ioctlx;
+ privcmd_mmapbatch_t ioctlx;
void *addr;
addr = mmap(NULL, num*PAGE_SIZE, prot, MAP_SHARED, xc_handle, 0);
if ( addr == MAP_FAILED )
@@ -38,8 +38,8 @@
int size, int prot,
unsigned long mfn )
{
- privcmd_mmap_t ioctlx;
- privcmd_mmap_entry_t entry;
+ privcmd_mmap_t ioctlx;
+ privcmd_mmap_entry_t entry;
void *addr;
addr = mmap(NULL, size, prot, MAP_SHARED, xc_handle, 0);
if ( addr == MAP_FAILED )
@@ -64,7 +64,7 @@
/*******************/
/* NB: arr must be mlock'ed */
-int xc_get_pfn_type_batch(int xc_handle,
+int xc_get_pfn_type_batch(int xc_handle,
uint32_t dom, int num, unsigned long *arr)
{
DECLARE_DOM0_OP;
@@ -76,8 +76,8 @@
}
#define GETPFN_ERR (~0U)
-unsigned int get_pfn_type(int xc_handle,
- unsigned long mfn,
+unsigned int get_pfn_type(int xc_handle,
+ unsigned long mfn,
uint32_t dom)
{
DECLARE_DOM0_OP;
@@ -119,7 +119,7 @@
out1:
return ret;
-}
+}
static int flush_mmu_updates(int xc_handle, xc_mmu_t *mmu)
{
@@ -166,7 +166,7 @@
return mmu;
}
-int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
+int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
unsigned long long ptr, unsigned long long val)
{
mmu->updates[mmu->idx].ptr = ptr;
@@ -288,7 +288,7 @@
out1:
return ret;
-}
+}
long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
@@ -308,8 +308,8 @@
int xc_get_pfn_list(int xc_handle,
- uint32_t domid,
- unsigned long *pfn_buf,
+ uint32_t domid,
+ unsigned long *pfn_buf,
unsigned long max_pfns)
{
DECLARE_DOM0_OP;
@@ -327,7 +327,7 @@
{
PERROR("xc_get_pfn_list: pfn_buf mlock failed");
return -1;
- }
+ }
ret = do_dom0_op(xc_handle, &op);
@@ -356,13 +356,13 @@
DECLARE_DOM0_OP;
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)domid;
- return (do_dom0_op(xc_handle, &op) < 0) ?
+ return (do_dom0_op(xc_handle, &op) < 0) ?
-1 : op.u.getdomaininfo.tot_pages;
}
int xc_copy_to_domain_page(int xc_handle,
uint32_t domid,
- unsigned long dst_pfn,
+ unsigned long dst_pfn,
const char *src_page)
{
void *vaddr = xc_map_foreign_range(
@@ -481,7 +481,7 @@
{
unsigned long new_mfn;
- if ( xc_domain_memory_decrease_reservation(
+ if ( xc_domain_memory_decrease_reservation(
xc_handle, domid, 1, 0, &mfn) != 0 )
{
fprintf(stderr,"xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_private.h Sat Apr 15 09:07:31 2006
@@ -57,7 +57,7 @@
}
static inline int do_privcmd(int xc_handle,
- unsigned int cmd,
+ unsigned int cmd,
unsigned long data)
{
return ioctl(xc_handle, cmd, data);
@@ -67,7 +67,7 @@
privcmd_hypercall_t *hypercall)
{
return do_privcmd(xc_handle,
- IOCTL_PRIVCMD_HYPERCALL,
+ IOCTL_PRIVCMD_HYPERCALL,
(unsigned long)hypercall);
}
@@ -78,7 +78,7 @@
hypercall.op = __HYPERVISOR_xen_version;
hypercall.arg[0] = (unsigned long) cmd;
hypercall.arg[1] = (unsigned long) dest;
-
+
return do_xen_hypercall(xc_handle, &hypercall);
}
@@ -121,13 +121,13 @@
unsigned long va;
unsigned long mfn;
unsigned long npages;
-} privcmd_mmap_entry_t;
+} privcmd_mmap_entry_t;
typedef struct privcmd_mmap {
int num;
domid_t dom;
privcmd_mmap_entry_t *entry;
-} privcmd_mmap_t;
+} privcmd_mmap_t;
*/
#endif /* __XC_PRIVATE_H__ */
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_ptrace.c Sat Apr 15 09:07:31 2006
@@ -46,7 +46,7 @@
static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
extern int ffsll(long long int);
-#define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i =
ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
+#define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i =
ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
static int
@@ -58,22 +58,22 @@
if (online)
*online = 0;
if ( !(regs_valid & (1 << cpu)) )
- {
- retval = xc_vcpu_getcontext(xc_handle, current_domid,
- cpu, &ctxt[cpu]);
- if ( retval )
+ {
+ retval = xc_vcpu_getcontext(xc_handle, current_domid,
+ cpu, &ctxt[cpu]);
+ if ( retval )
goto done;
- regs_valid |= (1 << cpu);
-
- }
- if ( online == NULL )
- goto done;
-
- retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
- *online = info.online;
-
+ regs_valid |= (1 << cpu);
+
+ }
+ if ( online == NULL )
+ goto done;
+
+ retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
+ *online = info.online;
+
done:
- return retval;
+ return retval;
}
static struct thr_ev_handlers {
@@ -81,8 +81,8 @@
thr_ev_handler_t td_death;
} handlers;
-void
-xc_register_event_handler(thr_ev_handler_t h,
+void
+xc_register_event_handler(thr_ev_handler_t h,
td_event_e e)
{
switch (e) {
@@ -97,7 +97,7 @@
}
}
-static inline int
+static inline int
paging_enabled(vcpu_guest_context_t *v)
{
unsigned long cr0 = v->ctrlreg[0];
@@ -114,19 +114,19 @@
get_online_cpumap(int xc_handle, dom0_getdomaininfo_t *d, cpumap_t *cpumap)
{
int i, online, retval;
-
+
*cpumap = 0;
for (i = 0; i <= d->max_vcpu_id; i++) {
if ((retval = fetch_regs(xc_handle, i, &online)))
return retval;
if (online)
- *cpumap |= (1 << i);
- }
-
+ *cpumap |= (1 << i);
+ }
+
return 0;
}
-/*
+/*
* Notify GDB of any vcpus that have come online or gone offline
* update online_cpumap
*
@@ -137,7 +137,7 @@
{
cpumap_t changed_cpumap = cpumap ^ online_cpumap;
int index;
-
+
while ( (index = ffsll(changed_cpumap)) ) {
if ( cpumap & (1 << (index - 1)) )
{
@@ -149,7 +149,7 @@
changed_cpumap &= ~(1 << (index - 1));
}
online_cpumap = cpumap;
-
+
}
/* --------------------- */
@@ -172,7 +172,7 @@
static unsigned long pde_phys[MAX_VIRT_CPUS];
static uint32_t *pde_virt[MAX_VIRT_CPUS];
static unsigned long page_phys[MAX_VIRT_CPUS];
- static uint32_t *page_virt[MAX_VIRT_CPUS];
+ static uint32_t *page_virt[MAX_VIRT_CPUS];
static int prev_perm[MAX_VIRT_CPUS];
if (ctxt[cpu].ctrlreg[3] == 0)
@@ -221,7 +221,7 @@
return NULL;
}
prev_perm[cpu] = perm;
- }
+ }
return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
}
@@ -284,7 +284,7 @@
if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
return map_domain_va_32(xc_handle, cpu, guest_va, perm);
- l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
+ l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
if ( l4 == NULL )
return NULL;
@@ -349,7 +349,7 @@
mode = MODE_64;
else if ( strstr(caps, "-x86_32p") )
mode = MODE_PAE;
- else if ( strstr(caps, "-x86_32") )
+ else if ( strstr(caps, "-x86_32") )
mode = MODE_32;
}
@@ -374,7 +374,7 @@
if (fetch_regs(xc_handle, cpu, NULL))
return NULL;
- if (!paging_enabled(&ctxt[cpu])) {
+ if (!paging_enabled(&ctxt[cpu])) {
static void * v;
unsigned long page;
@@ -383,9 +383,9 @@
page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT;
- v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
+ v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
perm, page >> PAGE_SHIFT);
-
+
if ( v == NULL )
return NULL;
@@ -403,7 +403,7 @@
int control_c_pressed_flag = 0;
-static int
+static int
__xc_waitdomain(
int xc_handle,
int domain,
@@ -420,7 +420,7 @@
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = domain;
-
+
retry:
retval = do_dom0_op(xc_handle, &op);
if ( retval || (op.u.getdomaininfo.domain != domain) )
@@ -429,7 +429,7 @@
goto done;
}
*status = op.u.getdomaininfo.flags;
-
+
if ( options & WNOHANG )
goto done;
@@ -472,16 +472,16 @@
void *data = (char *)edata;
cpu = (request != PTRACE_ATTACH) ? domid_tid : 0;
-
+
switch ( request )
- {
+ {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
if (current_isfile)
- guest_va = (unsigned long *)map_domain_va_core(current_domid,
+ guest_va = (unsigned long *)map_domain_va_core(current_domid,
cpu, addr, ctxt);
else
- guest_va = (unsigned long *)map_domain_va(xc_handle,
+ guest_va = (unsigned long *)map_domain_va(xc_handle,
cpu, addr, PROT_READ);
if ( guest_va == NULL )
goto out_error;
@@ -492,26 +492,26 @@
case PTRACE_POKEDATA:
/* XXX assume that all CPUs have the same address space */
if (current_isfile)
- guest_va = (unsigned long *)map_domain_va_core(current_domid,
+ guest_va = (unsigned long *)map_domain_va_core(current_domid,
cpu, addr, ctxt);
else
- guest_va = (unsigned long *)map_domain_va(xc_handle,
+ guest_va = (unsigned long *)map_domain_va(xc_handle,
cpu, addr, PROT_READ|PROT_WRITE);
- if ( guest_va == NULL )
+ if ( guest_va == NULL )
goto out_error;
*guest_va = (unsigned long)data;
break;
case PTRACE_GETREGS:
- if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
+ if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
goto out_error;
- SET_PT_REGS(pt, ctxt[cpu].user_regs);
+ SET_PT_REGS(pt, ctxt[cpu].user_regs);
memcpy(data, &pt, sizeof(struct gdb_regs));
break;
case PTRACE_GETFPREGS:
case PTRACE_GETFPXREGS:
- if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
+ if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
goto out_error;
memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
break;
@@ -520,7 +520,7 @@
if (current_isfile)
goto out_unspported; /* XXX not yet supported */
SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
- if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
&ctxt[cpu])))
goto out_error_dom0;
break;
@@ -531,8 +531,8 @@
/* XXX we can still have problems if the user switches threads
* during single-stepping - but that just seems retarded
*/
- ctxt[cpu].user_regs.eflags |= PSL_T;
- if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
+ ctxt[cpu].user_regs.eflags |= PSL_T;
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
&ctxt[cpu])))
goto out_error_dom0;
/* FALLTHROUGH */
@@ -545,13 +545,13 @@
{
FOREACH_CPU(cpumap, index) {
cpu = index - 1;
- if (fetch_regs(xc_handle, cpu, NULL))
+ if (fetch_regs(xc_handle, cpu, NULL))
goto out_error;
/* Clear trace flag */
- if ( ctxt[cpu].user_regs.eflags & PSL_T )
+ if ( ctxt[cpu].user_regs.eflags & PSL_T )
{
ctxt[cpu].user_regs.eflags &= ~PSL_T;
- if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
cpu, &ctxt[cpu])))
goto out_error_dom0;
}
@@ -566,7 +566,7 @@
goto out_error_dom0;
}
regs_valid = 0;
- if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
+ if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
current_domid : -current_domid)))
goto out_error_dom0;
break;
@@ -627,7 +627,7 @@
}
-int
+int
xc_waitdomain(
int xc_handle,
int domain,
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_ptrace.h
--- a/tools/libxc/xc_ptrace.h Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_ptrace.h Sat Apr 15 09:07:31 2006
@@ -107,7 +107,7 @@
long esi; /* 12 */
long edi; /* 16 */
long ebp; /* 20 */
- long eax; /* 24 */
+ long eax; /* 24 */
int xds; /* 28 */
int xes; /* 32 */
int xfs; /* 36 */
@@ -116,7 +116,7 @@
long eip; /* 48 */
int xcs; /* 52 */
long eflags; /* 56 */
- long esp; /* 60 */
+ long esp; /* 60 */
int xss; /* 64 */
};
@@ -169,20 +169,20 @@
typedef void (*thr_ev_handler_t)(long);
void xc_register_event_handler(
- thr_ev_handler_t h,
+ thr_ev_handler_t h,
td_event_e e);
long xc_ptrace(
int xc_handle,
- enum __ptrace_request request,
+ enum __ptrace_request request,
uint32_t domid,
- long addr,
+ long addr,
long data);
int xc_waitdomain(
int xc_handle,
- int domain,
- int *status,
+ int domain,
+ int *status,
int options);
#endif /* XC_PTRACE */
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_ptrace_core.c
--- a/tools/libxc/xc_ptrace_core.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_ptrace_core.c Sat Apr 15 09:07:31 2006
@@ -39,7 +39,7 @@
static unsigned long page_phys[MAX_VIRT_CPUS];
static unsigned long *page_virt[MAX_VIRT_CPUS];
- if (cr3[cpu] != cr3_phys[cpu])
+ if (cr3[cpu] != cr3_phys[cpu])
{
cr3_phys[cpu] = cr3[cpu];
if (cr3_virt[cpu])
@@ -53,12 +53,12 @@
return NULL;
}
cr3_virt[cpu] = v;
- }
+ }
if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
return NULL;
if (ctxt[cpu].flags & VGCF_HVM_GUEST)
pde = p2m_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
- if (pde != pde_phys[cpu])
+ if (pde != pde_phys[cpu])
{
pde_phys[cpu] = pde;
if (pde_virt[cpu])
@@ -74,7 +74,7 @@
return NULL;
if (ctxt[cpu].flags & VGCF_HVM_GUEST)
page = p2m_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
- if (page != page_phys[cpu])
+ if (page != page_phys[cpu])
{
page_phys[cpu] = page;
if (page_virt[cpu])
@@ -89,11 +89,11 @@
return NULL;
}
page_virt[cpu] = v;
- }
+ }
return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
}
-int
+int
xc_waitdomain_core(
int xc_handle,
int domfd,
@@ -122,7 +122,7 @@
nr_vcpus = header.xch_nr_vcpus;
pages_offset = header.xch_pages_offset;
- if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
+ if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
sizeof(vcpu_guest_context_t)*nr_vcpus)
return -1;
@@ -134,7 +134,7 @@
printf("Could not allocate p2m_array\n");
return -1;
}
- if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
+ if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
sizeof(unsigned long)*nr_pages)
return -1;
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_sedf.c
--- a/tools/libxc/xc_sedf.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_sedf.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_sedf.c
- *
+ *
* API for manipulating parameters of the Simple EDF scheduler.
- *
+ *
* changes by Stephan Diestelhorst
* based on code
* by Mark Williamson, Copyright (c) 2004 Intel Research Cambridge.
@@ -35,7 +35,7 @@
int ret;
struct sedf_adjdom *p = &op.u.adjustdom.u.sedf;
- op.cmd = DOM0_ADJUSTDOM;
+ op.cmd = DOM0_ADJUSTDOM;
op.u.adjustdom.domain = (domid_t)domid;
op.u.adjustdom.sched_id = SCHED_SEDF;
op.u.adjustdom.direction = SCHED_INFO_GET;
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xc_tbuf.c
--- a/tools/libxc/xc_tbuf.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xc_tbuf.c Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xc_tbuf.c
- *
+ *
* API for manipulating and accessing trace buffer parameters
- *
+ *
* Copyright (c) 2005, Rob Gardner
*/
@@ -18,7 +18,7 @@
op.u.tbufcontrol.op = DOM0_TBUF_ENABLE;
else
op.u.tbufcontrol.op = DOM0_TBUF_DISABLE;
-
+
return xc_dom0_op(xc_handle, &op);
}
@@ -30,10 +30,10 @@
op.interface_version = DOM0_INTERFACE_VERSION;
op.u.tbufcontrol.op = DOM0_TBUF_SET_SIZE;
op.u.tbufcontrol.size = size;
-
+
return xc_dom0_op(xc_handle, &op);
}
-
+
int xc_tbuf_get_size(int xc_handle, uint32_t *size)
{
int rc;
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xenctrl.h Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xenctrl.h
- *
+ *
* A library for low-level access to the Xen control interfaces.
- *
+ *
* Copyright (c) 2003-2004, K A Fraser.
*/
@@ -30,7 +30,7 @@
/*
* DEFINITIONS FOR CPU BARRIERS
- */
+ */
#if defined(__i386__)
#define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
@@ -51,7 +51,7 @@
/*
* INITIALIZATION FUNCTIONS
- */
+ */
/**
* This function opens a handle to the hypervisor interface. This function can
@@ -96,20 +96,20 @@
long xc_ptrace_core(
int xc_handle,
- enum __ptrace_request request,
- uint32_t domid,
- long addr,
+ enum __ptrace_request request,
+ uint32_t domid,
+ long addr,
long data,
vcpu_guest_context_t *ctxt);
void * map_domain_va_core(
- unsigned long domfd,
- int cpu,
+ unsigned long domfd,
+ int cpu,
void *guest_va,
vcpu_guest_context_t *ctxt);
int xc_waitdomain_core(
int xc_handle,
- int domain,
- int *status,
+ int domain,
+ int *status,
int options,
vcpu_guest_context_t *ctxt);
@@ -120,7 +120,7 @@
typedef struct {
uint32_t domid;
uint32_t ssidref;
- unsigned int dying:1, crashed:1, shutdown:1,
+ unsigned int dying:1, crashed:1, shutdown:1,
paused:1, blocked:1, running:1;
unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
unsigned long nr_pages;
@@ -133,7 +133,7 @@
} xc_dominfo_t;
typedef dom0_getdomaininfo_t xc_domaininfo_t;
-int xc_domain_create(int xc_handle,
+int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t *pdomid);
@@ -144,7 +144,7 @@
* xc_domain_dumpcore_via_callback - produces a dump, using a specified
* callback function
*/
-int xc_domain_dumpcore(int xc_handle,
+int xc_domain_dumpcore(int xc_handle,
uint32_t domid,
const char *corename);
@@ -156,7 +156,7 @@
*/
typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
-int xc_domain_dumpcore_via_callback(int xc_handle,
+int xc_domain_dumpcore_via_callback(int xc_handle,
uint32_t domid,
void *arg,
dumpcore_rtn_t dump_rtn);
@@ -170,7 +170,7 @@
* @return 0 on success, -1 on failure.
*/
int xc_domain_max_vcpus(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned int max);
/**
@@ -181,7 +181,7 @@
* @parm domid the domain id to pause
* @return 0 on success, -1 on failure.
*/
-int xc_domain_pause(int xc_handle,
+int xc_domain_pause(int xc_handle,
uint32_t domid);
/**
* This function unpauses a domain. The domain should have been previously
@@ -191,7 +191,7 @@
* @parm domid the domain id to unpause
* return 0 on success, -1 on failure
*/
-int xc_domain_unpause(int xc_handle,
+int xc_domain_unpause(int xc_handle,
uint32_t domid);
/**
@@ -203,7 +203,7 @@
* @parm domid the domain id to destroy
* @return 0 on success, -1 on failure
*/
-int xc_domain_destroy(int xc_handle,
+int xc_domain_destroy(int xc_handle,
uint32_t domid);
/**
@@ -217,7 +217,7 @@
* @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
* @return 0 on success, -1 on failure
*/
-int xc_domain_shutdown(int xc_handle,
+int xc_domain_shutdown(int xc_handle,
uint32_t domid,
int reason);
@@ -242,7 +242,7 @@
* @return the number of domains enumerated or -1 on error
*/
int xc_domain_getinfo(int xc_handle,
- uint32_t first_domid,
+ uint32_t first_domid,
unsigned int max_doms,
xc_dominfo_t *info);
@@ -307,12 +307,12 @@
domid_t domid,
int vcpu);
-int xc_domain_sethandle(int xc_handle, uint32_t domid,
+int xc_domain_sethandle(int xc_handle, uint32_t domid,
xen_domain_handle_t handle);
typedef dom0_shadow_control_stats_t xc_shadow_control_stats_t;
int xc_shadow_control(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned int sop,
unsigned long *dirty_bitmap,
unsigned long pages,
@@ -386,7 +386,7 @@
int xc_readconsolering(int xc_handle,
char **pbuffer,
- unsigned int *pnr_chars,
+ unsigned int *pnr_chars,
int clear);
typedef dom0_physinfo_t xc_physinfo_t;
@@ -397,18 +397,18 @@
int *sched_id);
int xc_domain_setmaxmem(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned int max_memkb);
int xc_domain_memory_increase_reservation(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned int address_bits,
unsigned long *extent_start);
int xc_domain_memory_decrease_reservation(int xc_handle,
- uint32_t domid,
+ uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned long *extent_start);
@@ -443,7 +443,7 @@
unsigned long nr_mfns,
uint8_t allow_access);
-unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
+unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
unsigned long mfn);
typedef dom0_perfc_desc_t xc_perfc_desc_t;
@@ -492,11 +492,11 @@
unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
int vcpu, unsigned long long virt);
-int xc_get_pfn_list(int xc_handle, uint32_t domid, unsigned long *pfn_buf,
+int xc_get_pfn_list(int xc_handle, uint32_t domid, unsigned long *pfn_buf,
unsigned long max_pfns);
int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
- unsigned long *pfn_buf,
+ unsigned long *pfn_buf,
unsigned int start_page, unsigned int nr_pages);
int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
@@ -551,7 +551,7 @@
int xc_tbuf_set_size(int xc_handle, uint32_t size);
/**
- * This function retrieves the current size of the trace buffers.
+ * This function retrieves the current size of the trace buffers.
* Note that the size returned is in terms of bytes, not pages.
* @parm xc_handle a handle to an open hypervisor interface
@@ -577,7 +577,7 @@
};
typedef struct xc_mmu xc_mmu_t;
xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
-int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
+int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
unsigned long long ptr, unsigned long long val);
int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xenguest.h Sat Apr 15 09:07:31 2006
@@ -1,8 +1,8 @@
/******************************************************************************
* xenguest.h
- *
+ *
* A library for guest domain management in Xen.
- *
+ *
* Copyright (c) 2003-2004, K A Fraser.
*/
@@ -21,7 +21,7 @@
* @parm dom the id of the domain
* @return 0 on success, -1 on failure
*/
-int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
+int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
int (*suspend)(int domid));
@@ -37,8 +37,8 @@
* @parm store_mfn returned with the mfn of the store page
* @return 0 on success, -1 on failure
*/
-int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom,
- unsigned long nr_pfns, unsigned int store_evtchn,
+int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom,
+ unsigned long nr_pfns, unsigned int store_evtchn,
unsigned long *store_mfn, unsigned int console_evtchn,
unsigned long *console_mfn);
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xg_private.c Sat Apr 15 09:07:31 2006
@@ -1,6 +1,6 @@
/******************************************************************************
* xg_private.c
- *
+ *
* Helper functions for the rest of the library.
*/
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xg_private.h Sat Apr 15 09:07:31 2006
@@ -11,7 +11,7 @@
#include <sys/stat.h>
#include "xenctrl.h"
-#include "xenguest.h"
+#include "xenguest.h"
#include <xen/linux/privcmd.h>
#include <xen/memory.h>
@@ -62,7 +62,7 @@
#define L2_PAGETABLE_ENTRIES_PAE 512
#define L3_PAGETABLE_ENTRIES_PAE 4
-#if defined(__i386__)
+#if defined(__i386__)
#define L1_PAGETABLE_ENTRIES 1024
#define L2_PAGETABLE_ENTRIES 1024
#elif defined(__x86_64__)
@@ -71,7 +71,7 @@
#define L3_PAGETABLE_ENTRIES 512
#define L4_PAGETABLE_ENTRIES 512
#endif
-
+
#define PAGE_SHIFT XC_PAGE_SHIFT
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
@@ -167,8 +167,8 @@
int error;
int max_queue_size;
void * addr;
- privcmd_mmap_t ioctl;
-
+ privcmd_mmap_t ioctl;
+
} mfn_mapper_t;
int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
diff -r 1d8b3c85121d -r 74ee53209cca tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h Sat Apr 15 08:52:32 2006
+++ b/tools/libxc/xg_save_restore.h Sat Apr 15 09:07:31 2006
@@ -1,7 +1,7 @@
/*
** xg_save_restore.h
-**
-** Defintions and utilities for save / restore.
+**
+** Defintions and utilities for save / restore.
*/
#include "xc_private.h"
@@ -29,8 +29,8 @@
/*
-** We process save/restore/migrate in batches of pages; the below
-** determines how many pages we (at maximum) deal with in each batch.
+** We process save/restore/migrate in batches of pages; the below
+** determines how many pages we (at maximum) deal with in each batch.
*/
#define MAX_BATCH_SIZE 1024 /* up to 1024 pages (4MB) at a time */
@@ -40,56 +40,56 @@
/*
-** Determine various platform information required for save/restore, in
-** particular:
+** Determine various platform information required for save/restore, in
+** particular:
**
-** - the maximum MFN on this machine, used to compute the size of
-** the M2P table;
-**
-** - the starting virtual address of the the hypervisor; we use this
-** to determine which parts of guest address space(s) do and don't
-** require canonicalization during save/restore; and
-**
-** - the number of page-table levels for save/ restore. This should
-** be a property of the domain, but for the moment we just read it
+** - the maximum MFN on this machine, used to compute the size of
+** the M2P table;
+**
+** - the starting virtual address of the the hypervisor; we use this
+** to determine which parts of guest address space(s) do and don't
+** require canonicalization during save/restore; and
+**
+** - the number of page-table levels for save/ restore. This should
+** be a property of the domain, but for the moment we just read it
** from the hypervisor.
**
-** Returns 1 on success, 0 on failure.
+** Returns 1 on success, 0 on failure.
*/
-static int get_platform_info(int xc_handle, uint32_t dom,
- /* OUT */ unsigned long *max_mfn,
- /* OUT */ unsigned long *hvirt_start,
+static int get_platform_info(int xc_handle, uint32_t dom,
+ /* OUT */ unsigned long *max_mfn,
+ /* OUT */ unsigned long *hvirt_start,
/* OUT */ unsigned int *pt_levels)
-
-{
+
+{
xen_capabilities_info_t xen_caps = "";
xen_platform_parameters_t xen_params;
if (xc_version(xc_handle, XENVER_platform_parameters, &xen_params) != 0)
return 0;
-
+
if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0)
return 0;
*max_mfn = xc_memory_op(xc_handle, XENMEM_maximum_ram_page, NULL);
-
+
*hvirt_start = xen_params.virt_start;
if (strstr(xen_caps, "xen-3.0-x86_64"))
*pt_levels = 4;
else if (strstr(xen_caps, "xen-3.0-x86_32p"))
- *pt_levels = 3;
+ *pt_levels = 3;
else if (strstr(xen_caps, "xen-3.0-x86_32"))
- *pt_levels = 2;
- else
- return 0;
-
+ *pt_levels = 2;
+ else
+ return 0;
+
return 1;
-}
+}
-/*
-** Save/restore deal with the mfn_to_pfn (M2P) and pfn_to_mfn (P2M) tables.
+/*
+** Save/restore deal with the mfn_to_pfn (M2P) and pfn_to_mfn (P2M) tables.
** The M2P simply holds the corresponding PFN, while the top bit of a P2M
** entry tell us whether or not the the PFN is currently mapped.
*/
@@ -98,18 +98,18 @@
#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
-/*
-** The M2P is made up of some number of 'chunks' of at least 2MB in size.
-** The below definitions and utility function(s) deal with mapping the M2P
-** regarldess of the underlying machine memory size or architecture.
+/*
+** The M2P is made up of some number of 'chunks' of at least 2MB in size.
+** The below definitions and utility function(s) deal with mapping the M2P
+** regarldess of the underlying machine memory size or architecture.
*/
-#define M2P_SHIFT L2_PAGETABLE_SHIFT_PAE
-#define M2P_CHUNK_SIZE (1 << M2P_SHIFT)
-#define M2P_SIZE(_m) ROUNDUP(((_m) * sizeof(unsigned long)), M2P_SHIFT)
+#define M2P_SHIFT L2_PAGETABLE_SHIFT_PAE
+#define M2P_CHUNK_SIZE (1 << M2P_SHIFT)
+#define M2P_SIZE(_m) ROUNDUP(((_m) * sizeof(unsigned long)), M2P_SHIFT)
#define M2P_CHUNKS(_m) (M2P_SIZE((_m)) >> M2P_SHIFT)
/* Size in bytes of the P2M (rounded up to the nearest PAGE_SIZE bytes) */
-#define P2M_SIZE ROUNDUP((max_pfn * sizeof(unsigned long)), PAGE_SHIFT)
+#define P2M_SIZE ROUNDUP((max_pfn * sizeof(unsigned long)), PAGE_SHIFT)
/* Number of unsigned longs in a page */
#define ulpp (PAGE_SIZE/sizeof(unsigned long))
@@ -127,12 +127,12 @@
#define NR_SLACK_ENTRIES ((8 * 1024 * 1024) / PAGE_SIZE)
/* Is the given PFN within the 'slack' region at the top of the P2M? */
-#define IS_REAL_PFN(_pfn) ((max_pfn - (_pfn)) > NR_SLACK_ENTRIES)
+#define IS_REAL_PFN(_pfn) ((max_pfn - (_pfn)) > NR_SLACK_ENTRIES)
/* Returns TRUE if the PFN is currently mapped */
#define is_mapped(pfn_type) (!((pfn_type) & 0x80000000UL))
-#define INVALID_P2M_ENTRY (~0UL)
+#define INVALID_P2M_ENTRY (~0UL)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|