# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1193929636 21600
# Node ID 4255ca79f9d9944be4e3e0d4fdaf22aff77e7129
# Parent 41c1731c9125b74a10c8ddf2b7cf0549afb59959
# Parent adefbadab27ce0c83cd58fe4216c3d3521235366
merge with xen-unstable.hg (staging)
---
tools/misc/xenperf.c | 2
tools/python/xen/util/xsm/acm/acm.py | 31
++------
tools/python/xen/xend/XendOptions.py | 4 +
unmodified_drivers/linux-2.6/compat-include/xen/platform-compat.h | 9 ++
xen/arch/ia64/xen/mm.c | 3
xen/arch/x86/hvm/hvm.c | 21 +++--
xen/arch/x86/hvm/intercept.c | 26 ++++--
xen/arch/x86/hvm/stdvga.c | 6 +
xen/arch/x86/hvm/svm/svm.c | 7 -
xen/arch/x86/hvm/vpt.c | 14 +--
xen/arch/x86/mm/p2m.c | 38
+++++++---
xen/common/memory.c | 6 +
xen/include/asm-ia64/grant_table.h | 2
xen/include/asm-ia64/shadow.h | 2
xen/include/asm-x86/p2m.h | 15 ++-
xen/include/public/hvm/ioreq.h | 6 -
xen/include/xen/paging.h | 2
17 files changed, 115 insertions(+), 79 deletions(-)
diff -r 41c1731c9125 -r 4255ca79f9d9 tools/misc/xenperf.c
--- a/tools/misc/xenperf.c Thu Nov 01 09:00:27 2007 -0600
+++ b/tools/misc/xenperf.c Thu Nov 01 09:07:16 2007 -0600
@@ -161,7 +161,7 @@ int main(int argc, char *argv[])
if ( pcd == NULL
|| lock_pages(pcd, sizeof(*pcd) * num_desc) != 0
|| pcv == NULL
- || lock_pages(pcd, sizeof(*pcv) * num_val) != 0)
+ || lock_pages(pcv, sizeof(*pcv) * num_val) != 0)
{
fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n",
errno, strerror(errno));
diff -r 41c1731c9125 -r 4255ca79f9d9 tools/python/xen/util/xsm/acm/acm.py
--- a/tools/python/xen/util/xsm/acm/acm.py Thu Nov 01 09:00:27 2007 -0600
+++ b/tools/python/xen/util/xsm/acm/acm.py Thu Nov 01 09:07:16 2007 -0600
@@ -711,33 +711,24 @@ def unify_resname(resource, mustexist=Tr
except:
err("Resource spec '%s' contains no tap subtype" % resource)
- import os
- if typ in ["phy", "tap"]:
+ if typ in ["phy"]:
if not resfile.startswith("/"):
resfile = "/dev/" + resfile
if mustexist:
- stats = os.lstat(resfile)
- if stat.S_ISLNK(stats[stat.ST_MODE]):
- resolved = os.readlink(resfile)
- if resolved[0] != "/":
- resfile = os.path.join(os.path.dirname(resfile), resolved)
- resfile = os.path.abspath(resfile)
- else:
- resfile = resolved
+ resfile = os.path.realpath(resfile)
+ try:
stats = os.lstat(resfile)
- if not (stat.S_ISBLK(stats[stat.ST_MODE])):
+ if not (stat.S_ISBLK(stats[stat.ST_MODE])):
+ err("Invalid resource")
+ except:
err("Invalid resource")
if typ in [ "file", "tap" ]:
- if mustexist:
- stats = os.lstat(resfile)
- if stat.S_ISLNK(stats[stat.ST_MODE]):
- resfile = os.readlink(resfile)
- stats = os.lstat(resfile)
- if not stat.S_ISREG(stats[stat.ST_MODE]):
- err("Invalid resource")
-
- #file: resources must specified with absolute path
+ resfile = os.path.realpath(resfile)
+ if mustexist and not os.path.isfile(resfile):
+ err("Invalid resource")
+
+ #file: resources must be specified with absolute path
#vlan resources don't start with '/'
if typ != "vlan":
if (not resfile.startswith("/")) or \
diff -r 41c1731c9125 -r 4255ca79f9d9 tools/python/xen/xend/XendOptions.py
--- a/tools/python/xen/xend/XendOptions.py Thu Nov 01 09:00:27 2007 -0600
+++ b/tools/python/xen/xend/XendOptions.py Thu Nov 01 09:07:16 2007 -0600
@@ -386,6 +386,10 @@ if os.uname()[0] == 'SunOS':
return scf.get_bool(name)
except scf.error, e:
if e[0] == scf.SCF_ERROR_NOT_FOUND:
+ if val in ['yes', 'y', '1', 'on', 'true', 't']:
+ return True
+ if val in ['no', 'n', '0', 'off', 'false', 'f']:
+ return False
return val
else:
raise XendError("option %s: %s:%s" % (name, e[1], e[2]))
diff -r 41c1731c9125 -r 4255ca79f9d9
unmodified_drivers/linux-2.6/compat-include/xen/platform-compat.h
--- a/unmodified_drivers/linux-2.6/compat-include/xen/platform-compat.h Thu Nov
01 09:00:27 2007 -0600
+++ b/unmodified_drivers/linux-2.6/compat-include/xen/platform-compat.h Thu Nov
01 09:07:16 2007 -0600
@@ -44,11 +44,18 @@
#define gfp_t unsigned
#endif
-#if defined (_LINUX_NOTIFIER_H) && !defined ATOMIC_NOTIFIER_HEAD
+#if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD)
#define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name
#define atomic_notifier_chain_register(chain,nb)
notifier_chain_register(chain,nb)
#define atomic_notifier_chain_unregister(chain,nb)
notifier_chain_unregister(chain,nb)
#define atomic_notifier_call_chain(chain,val,v)
notifier_call_chain(chain,val,v)
+#endif
+
+#if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD)
+#define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name
+#define blocking_notifier_chain_register(chain,nb)
notifier_chain_register(chain,nb)
+#define blocking_notifier_chain_unregister(chain,nb)
notifier_chain_unregister(chain,nb)
+#define blocking_notifier_call_chain(chain,val,v)
notifier_call_chain(chain,val,v)
#endif
#if defined(_LINUX_MM_H) && defined set_page_count
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/arch/ia64/xen/mm.c Thu Nov 01 09:07:16 2007 -0600
@@ -2412,7 +2412,7 @@ steal_page(struct domain *d, struct page
return 0;
}
-void
+int
guest_physmap_add_page(struct domain *d, unsigned long gpfn,
unsigned long mfn)
{
@@ -2426,6 +2426,7 @@ guest_physmap_add_page(struct domain *d,
//BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >>
PAGE_SHIFT));
perfc_incr(guest_physmap_add_page);
+ return 0;
}
void
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/arch/x86/hvm/hvm.c Thu Nov 01 09:07:16 2007 -0600
@@ -50,9 +50,15 @@
#include <public/version.h>
#include <public/memory.h>
-/* Xen command-line option to disable hardware-assisted paging */
-static int opt_hap_disabled;
-invbool_param("hap", opt_hap_disabled);
+/*
+ * Xen command-line option to allow/disallow hardware-assisted paging.
+ * Since the phys-to-machine table of AMD NPT is in host format, 32-bit Xen
+ * can only support guests using NPT with up to a 4GB memory map. Therefore
+ * we disallow HAP by default on PAE Xen (by default we want to support an
+ * 8GB pseudophysical memory map for HVM guests on a PAE host).
+ */
+static int opt_hap_permitted = (CONFIG_PAGING_LEVELS != 3);
+boolean_param("hap", opt_hap_permitted);
int hvm_enabled __read_mostly;
@@ -82,10 +88,10 @@ void hvm_enable(struct hvm_function_tabl
if ( hvm_funcs.hap_supported )
{
- if ( opt_hap_disabled )
+ if ( !opt_hap_permitted )
hvm_funcs.hap_supported = 0;
- printk("HVM: Hardware Assisted Paging %sabled\n",
- hvm_funcs.hap_supported ? "en" : "dis");
+ printk("HVM: Hardware Assisted Paging detected %s.\n",
+ hvm_funcs.hap_supported ? "and enabled" : "but disabled");
}
}
@@ -1849,7 +1855,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
case HVM_PARAM_TIMER_MODE:
rc = -EINVAL;
if ( (a.value != HVMPTM_delay_for_missed_ticks) &&
- (a.value != HVMPTM_no_delay_for_missed_ticks) )
+ (a.value != HVMPTM_no_delay_for_missed_ticks) &&
+ (a.value != HVMPTM_no_missed_tick_accounting) )
goto param_fail;
break;
}
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/arch/x86/hvm/intercept.c Thu Nov 01 09:07:16 2007 -0600
@@ -157,19 +157,26 @@ int hvm_buffered_io_send(ioreq_t *p)
struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
buffered_iopage_t *pg = iorp->va;
buf_ioreq_t bp;
- /* Timeoffset sends 64b data, but no address. Use two consecutive slots.
*/
+ /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
int qw = 0;
/* Ensure buffered_iopage fits in a page */
BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
/* Return 0 for the cases we can't deal with. */
- if (p->addr > 0xffffful || p->data_is_ptr || p->df || p->count != 1)
+ if ( (p->addr > 0xffffful) || p->data_is_ptr || p->df || (p->count != 1) )
+ {
+ gdprintk(XENLOG_DEBUG, "slow ioreq. type:%d size:%"PRIu64" addr:0x%"
+ PRIx64" dir:%d ptr:%d df:%d count:%"PRIu64"\n",
+ p->type, p->size, p->addr, !!p->dir,
+ !!p->data_is_ptr, !!p->df, p->count);
return 0;
+ }
bp.type = p->type;
bp.dir = p->dir;
- switch (p->size) {
+ switch ( p->size )
+ {
case 1:
bp.size = 0;
break;
@@ -182,8 +189,6 @@ int hvm_buffered_io_send(ioreq_t *p)
case 8:
bp.size = 3;
qw = 1;
- gdprintk(XENLOG_INFO, "quadword ioreq type:%d data:%"PRIx64"\n",
- p->type, p->data);
break;
default:
gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);
@@ -191,11 +196,12 @@ int hvm_buffered_io_send(ioreq_t *p)
}
bp.data = p->data;
- bp.addr = qw ? ((p->data >> 16) & 0xfffful) : (p->addr & 0xffffful);
+ bp.addr = p->addr;
spin_lock(&iorp->lock);
- if ( (pg->write_pointer - pg->read_pointer) >= IOREQ_BUFFER_SLOT_NUM - (qw
? 1 : 0))
+ if ( (pg->write_pointer - pg->read_pointer) >=
+ (IOREQ_BUFFER_SLOT_NUM - qw) )
{
/* The queue is full: send the iopacket through the normal path. */
spin_unlock(&iorp->lock);
@@ -205,9 +211,9 @@ int hvm_buffered_io_send(ioreq_t *p)
memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
&bp, sizeof(bp));
- if (qw) {
+ if ( qw )
+ {
bp.data = p->data >> 32;
- bp.addr = (p->data >> 48) & 0xfffful;
memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM],
&bp, sizeof(bp));
}
@@ -215,7 +221,7 @@ int hvm_buffered_io_send(ioreq_t *p)
/* Make the ioreq_t visible /before/ write_pointer. */
wmb();
pg->write_pointer += qw ? 2 : 1;
-
+
spin_unlock(&iorp->lock);
return 1;
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/arch/x86/hvm/stdvga.c Thu Nov 01 09:07:16 2007 -0600
@@ -296,6 +296,8 @@ int stdvga_intercept_pio(ioreq_t *p)
{
if ( p->size != 1 )
gdprintk(XENLOG_WARNING, "unexpected io size:%d\n", (int)p->size);
+ if ( p->data_is_ptr )
+ gdprintk(XENLOG_WARNING, "unexpected data_is_ptr\n");
if ( !((p->addr == 0x3c5) && (s->sr_index >= sizeof(sr_mask))) &&
!((p->addr == 0x3cf) && (s->gr_index >= sizeof(gr_mask))) )
{
@@ -643,6 +645,10 @@ int stdvga_intercept_mmio(ioreq_t *p)
s->cache = 0;
}
}
+ else
+ {
+ buf = (p->dir == IOREQ_WRITE);
+ }
rc = (buf && hvm_buffered_io_send(p));
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Nov 01 09:07:16 2007 -0600
@@ -940,14 +940,7 @@ int start_svm(struct cpuinfo_x86 *c)
svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?
cpuid_edx(0x8000000A) : 0);
-#ifdef __x86_64__
- /*
- * Check CPUID for nested paging support. We support NPT only on 64-bit
- * hosts since the phys-to-machine table is in host format. Hence 32-bit
- * Xen could only support guests using NPT with up to a 4GB memory map.
- */
svm_function_table.hap_supported = cpu_has_svm_npt;
-#endif
hvm_enable(&svm_function_table);
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/arch/x86/hvm/vpt.c Thu Nov 01 09:07:16 2007 -0600
@@ -118,7 +118,14 @@ void pt_restore_timer(struct vcpu *v)
list_for_each_entry ( pt, head, list )
{
if ( !mode_is(v->domain, no_missed_tick_accounting) )
+ {
pt_process_missed_ticks(pt);
+ }
+ else if ( (NOW() - pt->scheduled) >= 0 )
+ {
+ pt->pending_intr_nr++;
+ pt->scheduled = NOW() + pt->period;
+ }
set_timer(&pt->timer, pt->scheduled);
}
@@ -139,14 +146,7 @@ static void pt_timer_fn(void *data)
{
pt->scheduled += pt->period;
if ( !mode_is(pt->vcpu->domain, no_missed_tick_accounting) )
- {
pt_process_missed_ticks(pt);
- }
- else if ( (NOW() - pt->scheduled) >= 0 )
- {
- pt->pending_intr_nr++;
- pt->scheduled = NOW() + pt->period;
- }
set_timer(&pt->timer, pt->scheduled);
}
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/arch/x86/mm/p2m.c Thu Nov 01 09:07:16 2007 -0600
@@ -219,15 +219,17 @@ set_p2m_entry(struct domain *d, unsigned
goto out;
#endif
#if CONFIG_PAGING_LEVELS >= 3
- // When using PAE Xen, we only allow 33 bits of pseudo-physical
- // address in translated guests (i.e. 8 GBytes). This restriction
- // comes from wanting to map the P2M table into the 16MB RO_MPT hole
- // in Xen's address space for translated PV guests.
- //
+ /*
+ * When using PAE Xen, we only allow 33 bits of pseudo-physical
+ * address in translated guests (i.e. 8 GBytes). This restriction
+ * comes from wanting to map the P2M table into the 16MB RO_MPT hole
+ * in Xen's address space for translated PV guests.
+ * When using AMD's NPT on PAE Xen, we are restricted to 4GB.
+ */
if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
L3_PAGETABLE_SHIFT - PAGE_SHIFT,
- (CONFIG_PAGING_LEVELS == 3
- ? 8
+ ((CONFIG_PAGING_LEVELS == 3)
+ ? (hvm_funcs.hap_supported ? 4 : 8)
: L3_PAGETABLE_ENTRIES),
PGT_l2_page_table) )
goto out;
@@ -686,16 +688,26 @@ guest_physmap_remove_page(struct domain
p2m_unlock(d);
}
-void
+int
guest_physmap_add_entry(struct domain *d, unsigned long gfn,
unsigned long mfn, p2m_type_t t)
{
unsigned long ogfn;
p2m_type_t ot;
mfn_t omfn;
+ int rc = 0;
if ( !paging_mode_translate(d) )
- return;
+ return -EINVAL;
+
+#if CONFIG_PAGING_LEVELS == 3
+ /* 32bit PAE nested paging does not support over 4GB guest due to
+ * hardware translation limit. This limitation is checked by comparing
+ * gfn with 0xfffffUL.
+ */
+ if ( paging_mode_hap(d) && (gfn > 0xfffffUL) )
+ return -EINVAL;
+#endif
p2m_lock(d);
audit_p2m(d);
@@ -735,18 +747,22 @@ guest_physmap_add_entry(struct domain *d
if ( mfn_valid(_mfn(mfn)) )
{
- set_p2m_entry(d, gfn, _mfn(mfn), t);
+ if ( !set_p2m_entry(d, gfn, _mfn(mfn), t) )
+ rc = -EINVAL;
set_gpfn_from_mfn(mfn, gfn);
}
else
{
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
gfn, mfn);
- set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid);
+ if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid) )
+ rc = -EINVAL;
}
audit_p2m(d);
p2m_unlock(d);
+
+ return rc;
}
/* Walk the whole p2m table, changing any entries of the old type
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/common/memory.c
--- a/xen/common/memory.c Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/common/memory.c Thu Nov 01 09:07:16 2007 -0600
@@ -131,7 +131,8 @@ static void populate_physmap(struct memo
if ( unlikely(paging_mode_translate(d)) )
{
for ( j = 0; j < (1 << a->extent_order); j++ )
- guest_physmap_add_page(d, gpfn + j, mfn + j);
+ if ( guest_physmap_add_page(d, gpfn + j, mfn + j) )
+ goto out;
}
else
{
@@ -445,8 +446,9 @@ static long memory_exchange(XEN_GUEST_HA
mfn = page_to_mfn(page);
if ( unlikely(paging_mode_translate(d)) )
{
+ /* Ignore failure here. There's nothing we can do. */
for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
- guest_physmap_add_page(d, gpfn + k, mfn + k);
+ (void)guest_physmap_add_page(d, gpfn + k, mfn + k);
}
else
{
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/include/asm-ia64/grant_table.h Thu Nov 01 09:07:16 2007 -0600
@@ -12,7 +12,7 @@ int replace_grant_host_mapping(unsigned
int replace_grant_host_mapping(unsigned long gpaddr, unsigned long mfn,
unsigned long new_gpaddr, unsigned int flags);
// for grant transfer
-void guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned
long mfn);
+int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long
mfn);
/* XXX
* somewhere appropriate
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/include/asm-ia64/shadow.h
--- a/xen/include/asm-ia64/shadow.h Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/include/asm-ia64/shadow.h Thu Nov 01 09:07:16 2007 -0600
@@ -40,7 +40,7 @@
* Utilities to change relationship of gpfn->mfn for designated domain,
* which is required by gnttab transfer, balloon, device model and etc.
*/
-void guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned
long mfn);
+int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long
mfn);
void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned
long mfn);
static inline int
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/include/asm-x86/p2m.h Thu Nov 01 09:07:16 2007 -0600
@@ -201,14 +201,17 @@ void p2m_teardown(struct domain *d);
void p2m_teardown(struct domain *d);
/* Add a page to a domain's p2m table */
-void guest_physmap_add_entry(struct domain *d, unsigned long gfn,
+int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
unsigned long mfn, p2m_type_t t);
-/* Untyped version for RAM only, for compatibility */
-static inline void guest_physmap_add_page(struct domain *d, unsigned long gfn,
- unsigned long mfn)
-{
- guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw);
+/* Untyped version for RAM only, for compatibility
+ *
+ * Return 0 for success
+ */
+static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
+ unsigned long mfn)
+{
+ return guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw);
}
/* Remove a page from a domain's p2m table */
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/include/public/hvm/ioreq.h Thu Nov 01 09:07:16 2007 -0600
@@ -80,9 +80,9 @@ struct buf_ioreq {
struct buf_ioreq {
uint8_t type; /* I/O type */
uint8_t dir:1; /* 1=read, 0=write */
- uint8_t size:2; /* 0=>1, 1=>2, 3=>8. If 8 then use two contig buf_ioreqs
*/
- uint16_t data; /* (low order) data */
- uint32_t addr; /* physical address or high-order data */
+ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */
+ uint32_t addr:20;/* physical address */
+ uint32_t data; /* data */
};
typedef struct buf_ioreq buf_ioreq_t;
diff -r 41c1731c9125 -r 4255ca79f9d9 xen/include/xen/paging.h
--- a/xen/include/xen/paging.h Thu Nov 01 09:00:27 2007 -0600
+++ b/xen/include/xen/paging.h Thu Nov 01 09:07:16 2007 -0600
@@ -18,7 +18,7 @@
#else
#define paging_mode_translate(d) (0)
-#define guest_physmap_add_page(d, p, m) ((void)0)
+#define guest_physmap_add_page(d, p, m) (0)
#define guest_physmap_remove_page(d, p, m) ((void)0)
#endif
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|