# HG changeset patch
# User Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
# Date 1278088484 -3600
# Node ID 380a9e36376136072184f6bcec63086cafca5c7d
# Parent eb72d1acf1152527b94b248aeb249fa462b2bed6
# Parent aecf092da7489bd41ffd3e58964535a0d61eb3b4
Merge
---
tools/xenmon/xenbaked.c | 10 +-
tools/xenpaging/xenpaging.c | 16 ++-
tools/xentrace/xentrace.c | 10 +-
xen/arch/x86/domctl.c | 2
xen/arch/x86/hvm/hvm.c | 2
xen/arch/x86/irq.c | 2
xen/arch/x86/mm.c | 9 +-
xen/arch/x86/mm/Makefile | 6 -
xen/arch/x86/mm/hap/p2m-ept.c | 10 --
xen/arch/x86/mm/p2m.c | 15 ++-
xen/arch/x86/oprofile/op_model_athlon.c | 74 ++++++++--------
xen/arch/x86/oprofile/op_model_p4.c | 140 ++++++++++++++++----------------
xen/arch/x86/oprofile/op_model_ppro.c | 66 +++++++--------
xen/arch/x86/x86_64/compat/traps.c | 5 -
xen/common/trace.c | 2
xen/include/asm-x86/mem_sharing.h | 8 +
xen/include/asm-x86/p2m.h | 26 +++--
xen/include/public/mem_event.h | 10 +-
18 files changed, 220 insertions(+), 193 deletions(-)
diff -r eb72d1acf115 -r 380a9e363761 tools/xenmon/xenbaked.c
--- a/tools/xenmon/xenbaked.c Tue Jun 29 18:02:35 2010 +0100
+++ b/tools/xenmon/xenbaked.c Fri Jul 02 17:34:44 2010 +0100
@@ -84,7 +84,7 @@ typedef struct settings_st {
} settings_t;
struct t_struct {
- struct t_info *t_info; /* Structure with information about individual
buffers */
+ const struct t_info *t_info; /* Structure with information about
individual buffers */
struct t_buf **meta; /* Pointers to trace buffer metadata */
unsigned char **data; /* Pointers to trace buffer data areas */
};
@@ -376,9 +376,8 @@ static struct t_struct *map_tbufs(unsign
}
/* Map t_info metadata structure */
- tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN,
- tinfo_size, PROT_READ | PROT_WRITE,
- tbufs_mfn);
+ tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN, tinfo_size,
+ PROT_READ, tbufs_mfn);
if ( tbufs.t_info == 0 )
{
@@ -404,7 +403,8 @@ static struct t_struct *map_tbufs(unsign
for(i=0; i<num; i++)
{
- uint32_t *mfn_list = ((uint32_t *)tbufs.t_info) +
tbufs.t_info->mfn_offset[i];
+ const uint32_t *mfn_list = (const uint32_t *)tbufs.t_info
+ + tbufs.t_info->mfn_offset[i];
int j;
xen_pfn_t pfn_list[tbufs.t_info->tbuf_size];
diff -r eb72d1acf115 -r 380a9e363761 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c Tue Jun 29 18:02:35 2010 +0100
+++ b/tools/xenpaging/xenpaging.c Fri Jul 02 17:34:44 2010 +0100
@@ -375,9 +375,11 @@ int xenpaging_resume_page(xenpaging_t *p
return ret;
}
-int xenpaging_populate_page(xc_interface *xch, xenpaging_t *paging,
- unsigned long *gfn, int fd, int i)
-{
+static int xenpaging_populate_page(
+ xc_interface *xch, xenpaging_t *paging,
+ uint64_t *gfn, int fd, int i)
+{
+ unsigned long _gfn;
void *page;
int ret;
@@ -392,8 +394,10 @@ int xenpaging_populate_page(xc_interface
/* Map page */
ret = -EFAULT;
+ _gfn = *gfn;
page = xc_map_foreign_pages(paging->xc_handle, paging->mem_event.domain_id,
- PROT_READ | PROT_WRITE, gfn, 1);
+ PROT_READ | PROT_WRITE, &_gfn, 1);
+ *gfn = _gfn;
if ( page == NULL )
{
ERROR("Error mapping page: page is null");
@@ -548,7 +552,7 @@ int main(int argc, char *argv[])
if ( i >= num_pages )
{
- DPRINTF("Couldn't find page %lx\n", req.gfn);
+ DPRINTF("Couldn't find page %"PRIx64"\n", req.gfn);
goto out;
}
@@ -579,7 +583,7 @@ int main(int argc, char *argv[])
else
{
DPRINTF("page already populated (domain = %d; vcpu = %d;"
- " gfn = %lx; paused = %"PRId64")\n",
+ " gfn = %"PRIx64"; paused = %"PRId64")\n",
paging->mem_event.domain_id, req.vcpu_id,
req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED);
diff -r eb72d1acf115 -r 380a9e363761 tools/xentrace/xentrace.c
--- a/tools/xentrace/xentrace.c Tue Jun 29 18:02:35 2010 +0100
+++ b/tools/xentrace/xentrace.c Fri Jul 02 17:34:44 2010 +0100
@@ -63,7 +63,7 @@ typedef struct settings_st {
} settings_t;
struct t_struct {
- struct t_info *t_info; /* Structure with information about individual
buffers */
+ const struct t_info *t_info; /* Structure with information about
individual buffers */
struct t_buf **meta; /* Pointers to trace buffer metadata */
unsigned char **data; /* Pointers to trace buffer data areas */
};
@@ -475,9 +475,8 @@ static struct t_struct *map_tbufs(unsign
int i;
/* Map t_info metadata structure */
- tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN,
- tinfo_size, PROT_READ | PROT_WRITE,
- tbufs_mfn);
+ tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN, tinfo_size,
+ PROT_READ, tbufs_mfn);
if ( tbufs.t_info == 0 )
{
@@ -503,7 +502,8 @@ static struct t_struct *map_tbufs(unsign
for(i=0; i<num; i++)
{
- uint32_t *mfn_list = ((uint32_t *)tbufs.t_info) +
tbufs.t_info->mfn_offset[i];
+ const uint32_t *mfn_list = (const uint32_t *)tbufs.t_info
+ + tbufs.t_info->mfn_offset[i];
int j;
xen_pfn_t pfn_list[tbufs.t_info->tbuf_size];
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/domctl.c Fri Jul 02 17:34:44 2010 +0100
@@ -1420,6 +1420,7 @@ long arch_do_domctl(
break;
#endif /* XEN_GDBSX_CONFIG */
+#ifdef __x86_64__
case XEN_DOMCTL_mem_event_op:
{
struct domain *d;
@@ -1450,6 +1451,7 @@ long arch_do_domctl(
}
}
break;
+#endif /* __x86_64__ */
default:
ret = -ENOSYS;
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Fri Jul 02 17:34:44 2010 +0100
@@ -982,6 +982,7 @@ bool_t hvm_hap_nested_page_fault(unsigne
return 1;
}
+#ifdef __x86_64__
/* Check if the page has been paged out */
if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
p2m_mem_paging_populate(current->domain, gfn);
@@ -992,6 +993,7 @@ bool_t hvm_hap_nested_page_fault(unsigne
mem_sharing_unshare_page(current->domain, gfn, 0);
return 1;
}
+#endif
/* Spurious fault? PoD and log-dirty also take this path. */
if ( p2m_is_ram(p2mt) )
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/irq.c Fri Jul 02 17:34:44 2010 +0100
@@ -1027,7 +1027,7 @@ static void __pirq_guest_eoi(struct doma
int pirq_guest_eoi(struct domain *d, int irq)
{
- if ( (irq < 0) || (irq > d->nr_pirqs) )
+ if ( (irq < 0) || (irq >= d->nr_pirqs) )
return -EINVAL;
__pirq_guest_eoi(d, irq);
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/mm.c Fri Jul 02 17:34:44 2010 +0100
@@ -3448,20 +3448,23 @@ int do_mmu_update(
rc = -ENOENT;
break;
}
+#ifdef __x86_64__
/* XXX: Ugly: pull all the checks into a separate
function.
* Don't want to do it now, not to interfere with
mem_paging
* patches */
else if ( p2m_ram_shared == l1e_p2mt )
{
/* Unshare the page for RW foreign mappings */
- if(l1e_get_flags(l1e) & _PAGE_RW)
+ if ( l1e_get_flags(l1e) & _PAGE_RW )
{
rc = mem_sharing_unshare_page(pg_owner,
l1e_get_pfn(l1e),
0);
- if(rc) break;
+ if ( rc )
+ break;
}
}
+#endif
okay = mod_l1_entry(va, l1e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
@@ -4806,8 +4809,10 @@ long arch_memory_op(int op, XEN_GUEST_HA
return rc;
}
+#ifdef __x86_64__
case XENMEM_get_sharing_freed_pages:
return mem_sharing_get_nr_saved_mfns();
+#endif
default:
return subarch_memory_op(op, arg);
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/mm/Makefile
--- a/xen/arch/x86/mm/Makefile Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/mm/Makefile Fri Jul 02 17:34:44 2010 +0100
@@ -6,9 +6,9 @@ obj-y += guest_walk_2.o
obj-y += guest_walk_2.o
obj-y += guest_walk_3.o
obj-$(x86_64) += guest_walk_4.o
-obj-y += mem_event.o
-obj-y += mem_paging.o
-obj-y += mem_sharing.o
+obj-$(x86_64) += mem_event.o
+obj-$(x86_64) += mem_paging.o
+obj-$(x86_64) += mem_sharing.o
guest_walk_%.o: guest_walk.c Makefile
$(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/mm/hap/p2m-ept.c Fri Jul 02 17:34:44 2010 +0100
@@ -307,18 +307,10 @@ ept_set_entry(struct domain *d, unsigned
}
else
{
- int num = order / EPT_TABLE_ORDER;
int level;
ept_entry_t *split_ept_entry;
- if ( (num >= 2) && hvm_hap_has_1gb(d) )
- num = 2;
- else if ( (num >= 1) && hvm_hap_has_2mb(d) )
- num = 1;
- else
- num = 0;
-
- for ( level = split_level; level > num ; level-- )
+ for ( level = split_level; level > walk_level ; level-- )
{
rv = ept_split_large_page(d, &table, &index, gfn, level);
if ( !rv )
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c Fri Jul 02 17:34:44 2010 +0100
@@ -1868,17 +1868,23 @@ void p2m_teardown(struct domain *d)
{
struct page_info *pg;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
+#ifdef __x86_64__
unsigned long gfn;
p2m_type_t t;
mfn_t mfn;
+#endif
p2m_lock(p2m);
- for(gfn=0; gfn < p2m->max_mapped_pfn; gfn++)
+
+#ifdef __x86_64__
+ for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
{
mfn = p2m->get_entry(d, gfn, &t, p2m_query);
- if(mfn_valid(mfn) && (t == p2m_ram_shared))
+ if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
}
+#endif
+
p2m->phys_table = pagetable_null();
while ( (pg = page_list_remove_head(&p2m->pages)) )
@@ -2551,7 +2557,7 @@ p2m_type_t p2m_change_type(struct domain
p2m_lock(d->arch.p2m);
- mfn = gfn_to_mfn(d, gfn, &pt);
+ mfn = gfn_to_mfn_query(d, gfn, &pt);
if ( pt == ot )
set_p2m_entry(d, gfn, mfn, 0, nt);
@@ -2616,6 +2622,7 @@ clear_mmio_p2m_entry(struct domain *d, u
return rc;
}
+#ifdef __x86_64__
int
set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
{
@@ -2798,7 +2805,7 @@ void p2m_mem_paging_resume(struct domain
/* Unpause any domains that were paused because the ring was full */
mem_event_unpause_vcpus(d);
}
-
+#endif /* __x86_64__ */
/*
* Local variables:
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/oprofile/op_model_athlon.c
--- a/xen/arch/x86/oprofile/op_model_athlon.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/oprofile/op_model_athlon.c Fri Jul 02 17:34:44 2010 +0100
@@ -26,23 +26,22 @@
#define NUM_COUNTERS 4
#define NUM_CONTROLS 4
-#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));}
while (0)
+#define CTR_READ(msr_content,msrs,c) do {rdmsrl(msrs->counters[(c)].addr,
(msr_content));} while (0)
#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned
int)(l), -1);} while (0)
-#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
+#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<31)))
-#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));}
while (0)
-#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));}
while (0)
-#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
-#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
-#define CTRL_CLEAR(lo, hi) (lo &= (1<<21), hi = 0)
-#define CTRL_SET_ENABLE(val) (val |= 1<<20)
+#define CTRL_READ(msr_content,msrs,c) do {rdmsrl(msrs->controls[(c)].addr,
(msr_content));} while (0)
+#define CTRL_WRITE(msr_content,msrs,c) do {wrmsrl(msrs->controls[(c)].addr,
(msr_content));} while (0)
+#define CTRL_SET_ACTIVE(n) (n |= (1ULL<<22))
+#define CTRL_SET_INACTIVE(n) (n &= ~(1ULL<<22))
+#define CTRL_CLEAR(val) (val &= (1ULL<<21))
+#define CTRL_SET_ENABLE(val) (val |= 1ULL<<20)
#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16))
#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
#define CTRL_SET_UM(val, m) (val |= ((m & 0xff) << 8))
-#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
-#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
-#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
-#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
+#define CTRL_SET_EVENT(val, e) (val |= (((e >> 8) & 0xf) | (e & 0xff)))
+#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 0x1ULL) << 41))
+#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 0x1ULL) << 40))
static unsigned long reset_value[NUM_COUNTERS];
@@ -64,14 +63,14 @@ static void athlon_fill_in_addresses(str
static void athlon_setup_ctrs(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ uint64_t msr_content;
int i;
/* clear all counters */
for (i = 0 ; i < NUM_CONTROLS; ++i) {
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low, high);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_CLEAR(msr_content);
+ CTRL_WRITE(msr_content, msrs, i);
}
/* avoid a false detection of ctr overflows in NMI handler */
@@ -86,17 +85,16 @@ static void athlon_setup_ctrs(struct op_
CTR_WRITE(counter_config[i].count, msrs, i);
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low, high);
- CTRL_SET_ENABLE(low);
- CTRL_SET_USR(low, counter_config[i].user);
- CTRL_SET_KERN(low, counter_config[i].kernel);
- CTRL_SET_UM(low, counter_config[i].unit_mask);
- CTRL_SET_EVENT_LOW(low, counter_config[i].event);
- CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
- CTRL_SET_HOST_ONLY(high, 0);
- CTRL_SET_GUEST_ONLY(high, 0);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_CLEAR(msr_content);
+ CTRL_SET_ENABLE(msr_content);
+ CTRL_SET_USR(msr_content, counter_config[i].user);
+ CTRL_SET_KERN(msr_content, counter_config[i].kernel);
+ CTRL_SET_UM(msr_content, counter_config[i].unit_mask);
+ CTRL_SET_EVENT(msr_content, counter_config[i].event);
+ CTRL_SET_HOST_ONLY(msr_content, 0);
+ CTRL_SET_GUEST_ONLY(msr_content, 0);
+ CTRL_WRITE(msr_content, msrs, i);
} else {
reset_value[i] = 0;
}
@@ -108,7 +106,7 @@ static int athlon_check_ctrs(unsigned in
struct cpu_user_regs * const regs)
{
- unsigned int low, high;
+ uint64_t msr_content;
int i;
int ovf = 0;
unsigned long eip = regs->eip;
@@ -128,8 +126,8 @@ static int athlon_check_ctrs(unsigned in
}
for (i = 0 ; i < NUM_COUNTERS; ++i) {
- CTR_READ(low, high, msrs, i);
- if (CTR_OVERFLOWED(low)) {
+ CTR_READ(msr_content, msrs, i);
+ if (CTR_OVERFLOWED(msr_content)) {
xenoprof_log_event(current, regs, eip, mode, i);
CTR_WRITE(reset_value[i], msrs, i);
ovf = 1;
@@ -143,13 +141,13 @@ static int athlon_check_ctrs(unsigned in
static void athlon_start(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ uint64_t msr_content;
int i;
for (i = 0 ; i < NUM_COUNTERS ; ++i) {
if (reset_value[i]) {
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_ACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_SET_ACTIVE(msr_content);
+ CTRL_WRITE(msr_content, msrs, i);
}
}
}
@@ -157,15 +155,15 @@ static void athlon_start(struct op_msrs
static void athlon_stop(struct op_msrs const * const msrs)
{
- unsigned int low,high;
+ uint64_t msr_content;
int i;
/* Subtle: stop on all counters to avoid race with
* setting our pm callback */
for (i = 0 ; i < NUM_COUNTERS ; ++i) {
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_INACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_SET_INACTIVE(msr_content);
+ CTRL_WRITE(msr_content, msrs, i);
}
}
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/oprofile/op_model_p4.c
--- a/xen/arch/x86/oprofile/op_model_p4.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/oprofile/op_model_p4.c Fri Jul 02 17:34:44 2010 +0100
@@ -347,35 +347,35 @@ static const struct p4_event_binding p4_
};
-#define MISC_PMC_ENABLED_P(x) ((x) & 1 << 7)
-
-#define ESCR_RESERVED_BITS 0x80000003
+#define MISC_PMC_ENABLED_P(x) ((x) & 1ULL << 7)
+
+#define ESCR_RESERVED_BITS 0x80000003ULL
#define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS)
-#define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1) << 2))
-#define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1) << 3))
-#define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1)))
-#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
-#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
-#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
-#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address,
(escr), (high));} while (0)
-#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address,
(escr), (high));} while (0)
-
-#define CCCR_RESERVED_BITS 0x38030FFF
+#define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1ULL) << 2))
+#define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1ULL) << 3))
+#define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1ULL)))
+#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1ULL) << 1))
+#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3fULL) << 25))
+#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffffULL) << 9))
+#define ESCR_READ(escr,ev,i) do {rdmsrl(ev->bindings[(i)].escr_address,
(escr));} while (0)
+#define ESCR_WRITE(escr,ev,i) do {wrmsrl(ev->bindings[(i)].escr_address,
(escr));} while (0)
+
+#define CCCR_RESERVED_BITS 0x38030FFFULL
#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
-#define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000)
-#define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07) << 13))
-#define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1<<26))
-#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
-#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
-#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
-#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address,
(low), (high));} while (0)
-#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address,
(low), (high));} while (0)
-#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
-#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
-
-#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l),
(h));} while (0)
-#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l),
-1);} while (0)
-#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
+#define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000ULL)
+#define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07ULL) << 13))
+#define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1ULL<<26))
+#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1ULL<<27))
+#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1ULL<<12))
+#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1ULL<<12))
+#define CCCR_READ(msr_content, i) do {rdmsrl(p4_counters[(i)].cccr_address,
(msr_content));} while (0)
+#define CCCR_WRITE(msr_content, i) do {wrmsrl(p4_counters[(i)].cccr_address,
(msr_content));} while (0)
+#define CCCR_OVF_P(cccr) ((cccr) & (1ULL<<31))
+#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1ULL<<31)))
+
+#define CTR_READ(msr_content,i) do {rdmsrl(p4_counters[(i)].counter_address,
(msr_content));} while (0)
+#define CTR_WRITE(msr_content,i) do {wrmsrl(p4_counters[(i)].counter_address,
-(msr_content));} while (0)
+#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000ULL))
/* this assigns a "stagger" to the current CPU, which is used throughout
@@ -481,9 +481,8 @@ static void pmc_setup_one_p4_counter(uns
{
int i;
int const maxbind = 2;
- unsigned int cccr = 0;
- unsigned int escr = 0;
- unsigned int high = 0;
+ uint64_t cccr = 0;
+ uint64_t escr = 0;
unsigned int counter_bit;
const struct p4_event_binding *ev = NULL;
unsigned int stag;
@@ -507,7 +506,7 @@ static void pmc_setup_one_p4_counter(uns
if (ev->bindings[i].virt_counter & counter_bit) {
/* modify ESCR */
- ESCR_READ(escr, high, ev, i);
+ ESCR_READ(escr, ev, i);
ESCR_CLEAR(escr);
if (stag == 0) {
ESCR_SET_USR_0(escr, counter_config[ctr].user);
@@ -518,10 +517,10 @@ static void pmc_setup_one_p4_counter(uns
}
ESCR_SET_EVENT_SELECT(escr, ev->event_select);
ESCR_SET_EVENT_MASK(escr,
counter_config[ctr].unit_mask);
- ESCR_WRITE(escr, high, ev, i);
+ ESCR_WRITE(escr, ev, i);
/* modify CCCR */
- CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
+ CCCR_READ(cccr, VIRT_CTR(stag, ctr));
CCCR_CLEAR(cccr);
CCCR_SET_REQUIRED_BITS(cccr);
CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
@@ -530,7 +529,7 @@ static void pmc_setup_one_p4_counter(uns
} else {
CCCR_SET_PMI_OVF_1(cccr);
}
- CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
+ CCCR_WRITE(cccr, VIRT_CTR(stag, ctr));
return;
}
}
@@ -544,68 +543,68 @@ static void p4_setup_ctrs(struct op_msrs
static void p4_setup_ctrs(struct op_msrs const * const msrs)
{
unsigned int i;
- unsigned int low, high;
+ uint64_t msr_content;
unsigned int addr;
unsigned int stag;
stag = get_stagger();
- rdmsr(MSR_IA32_MISC_ENABLE, low, high);
- if (! MISC_PMC_ENABLED_P(low)) {
+ rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
+ if (! MISC_PMC_ENABLED_P(msr_content)) {
printk(KERN_ERR "oprofile: P4 PMC not available\n");
return;
}
/* clear the cccrs we will use */
for (i = 0 ; i < num_counters ; i++) {
- rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- CCCR_CLEAR(low);
- CCCR_SET_REQUIRED_BITS(low);
- wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
+ rdmsrl(p4_counters[VIRT_CTR(stag, i)].cccr_address,
msr_content);
+ CCCR_CLEAR(msr_content);
+ CCCR_SET_REQUIRED_BITS(msr_content);
+ wrmsrl(p4_counters[VIRT_CTR(stag, i)].cccr_address,
msr_content);
}
/* clear cccrs outside our concern */
for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) {
- rdmsr(p4_unused_cccr[i], low, high);
- CCCR_CLEAR(low);
- CCCR_SET_REQUIRED_BITS(low);
- wrmsr(p4_unused_cccr[i], low, high);
+ rdmsrl(p4_unused_cccr[i], msr_content);
+ CCCR_CLEAR(msr_content);
+ CCCR_SET_REQUIRED_BITS(msr_content);
+ wrmsrl(p4_unused_cccr[i], msr_content);
}
/* clear all escrs (including those outside our concern) */
for (addr = MSR_P4_BSU_ESCR0 + stag;
addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) {
- wrmsr(addr, 0, 0);
+ wrmsrl(addr, 0x0ULL);
}
/* On older models clear also MSR_P4_IQ_ESCR0/1 */
if (boot_cpu_data.x86_model < 0x3) {
- wrmsr(MSR_P4_IQ_ESCR0, 0, 0);
- wrmsr(MSR_P4_IQ_ESCR1, 0, 0);
+ wrmsrl(MSR_P4_IQ_ESCR0, 0x0ULL);
+ wrmsrl(MSR_P4_IQ_ESCR1, 0x0ULL);
}
for (addr = MSR_P4_RAT_ESCR0 + stag;
addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {
- wrmsr(addr, 0, 0);
+ wrmsrl(addr, 0x0ULL);
}
for (addr = MSR_P4_MS_ESCR0 + stag;
addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){
- wrmsr(addr, 0, 0);
+ wrmsrl(addr, 0x0ULL);
}
for (addr = MSR_P4_IX_ESCR0 + stag;
addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){
- wrmsr(addr, 0, 0);
+ wrmsrl(addr, 0x0ULL);
}
if (num_counters == NUM_COUNTERS_NON_HT) {
- wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
- wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
+ wrmsrl(MSR_P4_CRU_ESCR4, 0x0ULL);
+ wrmsrl(MSR_P4_CRU_ESCR5, 0x0ULL);
} else if (stag == 0) {
- wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
+ wrmsrl(MSR_P4_CRU_ESCR4, 0x0ULL);
} else {
- wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
+ wrmsrl(MSR_P4_CRU_ESCR5, 0x0ULL);
}
/* setup all counters */
@@ -624,7 +623,8 @@ static int p4_check_ctrs(unsigned int co
struct op_msrs const * const msrs,
struct cpu_user_regs * const regs)
{
- unsigned long ctr, low, high, stag, real;
+ unsigned long ctr, stag, real;
+ uint64_t msr_content;
int i;
int ovf = 0;
unsigned long eip = regs->eip;
@@ -656,13 +656,13 @@ static int p4_check_ctrs(unsigned int co
real = VIRT_CTR(stag, i);
- CCCR_READ(low, high, real);
- CTR_READ(ctr, high, real);
- if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
+ CCCR_READ(msr_content, real);
+ CTR_READ(ctr, real);
+ if (CCCR_OVF_P(msr_content) || CTR_OVERFLOW_P(ctr)) {
xenoprof_log_event(current, regs, eip, mode, i);
CTR_WRITE(reset_value[i], real);
- CCCR_CLEAR_OVF(low);
- CCCR_WRITE(low, high, real);
+ CCCR_CLEAR_OVF(msr_content);
+ CCCR_WRITE(msr_content, real);
CTR_WRITE(reset_value[i], real);
ovf = 1;
}
@@ -677,7 +677,8 @@ static int p4_check_ctrs(unsigned int co
static void p4_start(struct op_msrs const * const msrs)
{
- unsigned int low, high, stag;
+ unsigned int stag;
+ uint64_t msr_content;
int i;
stag = get_stagger();
@@ -685,24 +686,25 @@ static void p4_start(struct op_msrs cons
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CCCR_READ(low, high, VIRT_CTR(stag, i));
- CCCR_SET_ENABLE(low);
- CCCR_WRITE(low, high, VIRT_CTR(stag, i));
+ CCCR_READ(msr_content, VIRT_CTR(stag, i));
+ CCCR_SET_ENABLE(msr_content);
+ CCCR_WRITE(msr_content, VIRT_CTR(stag, i));
}
}
static void p4_stop(struct op_msrs const * const msrs)
{
- unsigned int low, high, stag;
+ unsigned int stag;
+ uint64_t msr_content;
int i;
stag = get_stagger();
for (i = 0; i < num_counters; ++i) {
- CCCR_READ(low, high, VIRT_CTR(stag, i));
- CCCR_SET_DISABLE(low);
- CCCR_WRITE(low, high, VIRT_CTR(stag, i));
+ CCCR_READ(msr_content, VIRT_CTR(stag, i));
+ CCCR_SET_DISABLE(msr_content);
+ CCCR_WRITE(msr_content, VIRT_CTR(stag, i));
}
}
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/oprofile/op_model_ppro.c
--- a/xen/arch/x86/oprofile/op_model_ppro.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/oprofile/op_model_ppro.c Fri Jul 02 17:34:44 2010 +0100
@@ -43,18 +43,18 @@ static int counter_width = 32;
#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
-#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l),
(h));} while (0)
-#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l),
(h));} while (0)
-#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
-#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
-#define CTRL_CLEAR(x) (x &= (1<<21))
-#define CTRL_SET_ENABLE(val) (val |= 1<<20)
-#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16))
-#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
+#define CTRL_READ(msr_content,msrs,c) do {rdmsrl((msrs->controls[(c)].addr),
(msr_content));} while (0)
+#define CTRL_WRITE(msr_content,msrs,c) do {wrmsrl((msrs->controls[(c)].addr),
(msr_content));} while (0)
+#define CTRL_SET_ACTIVE(n) (n |= (1ULL<<22))
+#define CTRL_SET_INACTIVE(n) (n &= ~(1ULL<<22))
+#define CTRL_CLEAR(x) (x &= (1ULL<<21))
+#define CTRL_SET_ENABLE(val) (val |= 1ULL<<20)
+#define CTRL_SET_USR(val,u) (val |= ((u & 1ULL) << 16))
+#define CTRL_SET_KERN(val,k) (val |= ((k & 1ULL) << 17))
#define CTRL_SET_UM(val, m) (val |= (m << 8))
#define CTRL_SET_EVENT(val, e) (val |= e)
-#define IS_ACTIVE(val) (val & (1 << 22) )
-#define IS_ENABLE(val) (val & (1 << 20) )
+#define IS_ACTIVE(val) (val & (1ULL << 22) )
+#define IS_ENABLE(val) (val & (1ULL << 20) )
static unsigned long reset_value[OP_MAX_COUNTER];
int ppro_has_global_ctrl = 0;
@@ -71,7 +71,7 @@ static void ppro_fill_in_addresses(struc
static void ppro_setup_ctrs(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ uint64_t msr_content;
int i;
if (cpu_has_arch_perfmon) {
@@ -93,14 +93,14 @@ static void ppro_setup_ctrs(struct op_ms
/* clear all counters */
for (i = 0 ; i < num_counters; ++i) {
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_CLEAR(msr_content);
+ CTRL_WRITE(msr_content, msrs, i);
}
/* avoid a false detection of ctr overflows in NMI handler */
for (i = 0; i < num_counters; ++i)
- wrmsrl(msrs->counters[i].addr, -1LL);
+ wrmsrl(msrs->counters[i].addr, ~0x0ULL);
/* enable active counters */
for (i = 0; i < num_counters; ++i) {
@@ -109,14 +109,14 @@ static void ppro_setup_ctrs(struct op_ms
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
- CTRL_SET_ENABLE(low);
- CTRL_SET_USR(low, counter_config[i].user);
- CTRL_SET_KERN(low, counter_config[i].kernel);
- CTRL_SET_UM(low, counter_config[i].unit_mask);
- CTRL_SET_EVENT(low, counter_config[i].event);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_CLEAR(msr_content);
+ CTRL_SET_ENABLE(msr_content);
+ CTRL_SET_USR(msr_content, counter_config[i].user);
+ CTRL_SET_KERN(msr_content, counter_config[i].kernel);
+ CTRL_SET_UM(msr_content, counter_config[i].unit_mask);
+ CTRL_SET_EVENT(msr_content, counter_config[i].event);
+ CTRL_WRITE(msr_content, msrs, i);
} else {
reset_value[i] = 0;
}
@@ -166,38 +166,38 @@ static int ppro_check_ctrs(unsigned int
static void ppro_start(struct op_msrs const * const msrs)
{
- unsigned int low,high;
+ uint64_t msr_content;
int i;
for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) {
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_ACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_SET_ACTIVE(msr_content);
+ CTRL_WRITE(msr_content, msrs, i);
}
}
/* Global Control MSR is enabled by default when system power on.
* However, this may not hold true when xenoprof starts to run.
*/
if ( ppro_has_global_ctrl )
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, (1<<num_counters) - 1);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, (1ULL<<num_counters) - 1);
}
static void ppro_stop(struct op_msrs const * const msrs)
{
- unsigned int low,high;
+ uint64_t msr_content;
int i;
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_INACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ CTRL_READ(msr_content, msrs, i);
+ CTRL_SET_INACTIVE(msr_content);
+ CTRL_WRITE(msr_content, msrs, i);
}
if ( ppro_has_global_ctrl )
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0ULL);
}
static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
diff -r eb72d1acf115 -r 380a9e363761 xen/arch/x86/x86_64/compat/traps.c
--- a/xen/arch/x86/x86_64/compat/traps.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/arch/x86/x86_64/compat/traps.c Fri Jul 02 17:34:44 2010 +0100
@@ -127,9 +127,8 @@ unsigned int compat_iret(void)
ti = &v->arch.guest_context.trap_ctxt[13];
if ( TI_GET_IF(ti) )
eflags &= ~X86_EFLAGS_IF;
- regs->_eflags = eflags & ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
- X86_EFLAGS_NT|X86_EFLAGS_TF);
-
+ regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+ X86_EFLAGS_NT|X86_EFLAGS_TF);
if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
goto exit_and_crash;
regs->_eip = ti->address;
diff -r eb72d1acf115 -r 380a9e363761 xen/common/trace.c
--- a/xen/common/trace.c Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/common/trace.c Fri Jul 02 17:34:44 2010 +0100
@@ -309,7 +309,7 @@ void __init init_trace_bufs(void)
for(i=0; i<T_INFO_PAGES; i++)
share_xen_page_with_privileged_guests(
- virt_to_page(t_info) + i, XENSHARE_writable);
+ virt_to_page(t_info) + i, XENSHARE_readonly);
if ( opt_tbuf_size == 0 )
{
diff -r eb72d1acf115 -r 380a9e363761 xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/include/asm-x86/mem_sharing.h Fri Jul 02 17:34:44 2010 +0100
@@ -22,6 +22,8 @@
#ifndef __MEM_SHARING_H__
#define __MEM_SHARING_H__
+#ifdef __x86_64__
+
#define sharing_supported(_d) \
(is_hvm_domain(_d) && paging_mode_hap(_d))
@@ -43,4 +45,10 @@ int mem_sharing_domctl(struct domain *d,
xen_domctl_mem_sharing_op_t *mec);
void mem_sharing_init(void);
+#else
+
+#define mem_sharing_init() do { } while (0)
+
+#endif /* __x86_64__ */
+
#endif /* __MEM_SHARING_H__ */
diff -r eb72d1acf115 -r 380a9e363761 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/include/asm-x86/p2m.h Fri Jul 02 17:34:44 2010 +0100
@@ -78,11 +78,12 @@ typedef enum {
p2m_grant_map_rw = 7, /* Read/write grant mapping */
p2m_grant_map_ro = 8, /* Read-only grant mapping */
+ /* Likewise, although these are defined in all builds, they can only
+ * be used in 64-bit builds */
p2m_ram_paging_out = 9, /* Memory that is being paged out */
p2m_ram_paged = 10, /* Memory that has been paged out */
p2m_ram_paging_in = 11, /* Memory that is being paged in */
p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
-
p2m_ram_shared = 13, /* Shared or sharable memory */
} p2m_type_t;
@@ -154,6 +155,7 @@ typedef enum {
#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES)
#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
+
/* Populate-on-demand */
#define POPULATE_ON_DEMAND_MFN (1<<9)
@@ -323,20 +325,21 @@ static inline mfn_t gfn_to_mfn_unshare(s
int must_succeed)
{
mfn_t mfn;
- int ret;
mfn = gfn_to_mfn(d, gfn, p2mt);
- if(p2m_is_shared(*p2mt))
+#ifdef __x86_64__
+ if ( p2m_is_shared(*p2mt) )
{
- ret = mem_sharing_unshare_page(d, gfn,
- must_succeed ? MEM_SHARING_MUST_SUCCEED : 0);
- if(ret < 0)
+ if ( mem_sharing_unshare_page(d, gfn,
+ must_succeed
+ ? MEM_SHARING_MUST_SUCCEED : 0) )
{
BUG_ON(must_succeed);
return mfn;
}
mfn = gfn_to_mfn(d, gfn, p2mt);
}
+#endif
return mfn;
}
@@ -438,10 +441,11 @@ p2m_type_t p2m_change_type(struct domain
/* Set mmio addresses in the p2m table (for pass-through) */
int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+
+
+#ifdef __x86_64__
/* Modify p2m table for shared gfn */
-int
-set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
-
+int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
/* Check if a nominated gfn is valid to be paged out */
int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
/* Evict a frame */
@@ -452,6 +456,10 @@ int p2m_mem_paging_prep(struct domain *d
int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
/* Resume normal operation (in case a domain was paused) */
void p2m_mem_paging_resume(struct domain *d);
+#else
+static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
+{ }
+#endif
struct page_info *p2m_alloc_ptp(struct domain *d, unsigned long type);
diff -r eb72d1acf115 -r 380a9e363761 xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h Tue Jun 29 18:02:35 2010 +0100
+++ b/xen/include/public/mem_event.h Fri Jul 02 17:34:44 2010 +0100
@@ -40,14 +40,14 @@
typedef struct mem_event_shared_page {
- int port;
+ uint32_t port;
} mem_event_shared_page_t;
typedef struct mem_event_st {
- unsigned long gfn;
- unsigned long offset;
- unsigned long p2mt;
- int vcpu_id;
+ uint64_t gfn;
+ uint64_t offset;
+ uint32_t p2mt;
+ int32_t vcpu_id;
uint64_t flags;
} mem_event_request_t, mem_event_response_t;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|