# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Node ID 2db4388fecb9d08fa2a435d2bfcc3026fb14f218
# Parent d93280670c3f315e2d48200564ed2220c1542209
[HVM] Use correct types for guest physical addresses
Guest physical addresses are not guaranteed to fit in either a pointer
or an unsigned long int; use paddr_t for them.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
tools/ioemu/target-i386-dm/helper2.c | 62 ++++++++++++------------
tools/libxc/xc_hvm_build.c | 1
xen/arch/ia64/vmx/mmio.c | 16 +++---
xen/arch/x86/hvm/hvm.c | 33 +++++-------
xen/arch/x86/hvm/i8254.c | 12 ++--
xen/arch/x86/hvm/i8259.c | 28 ++++------
xen/arch/x86/hvm/intercept.c | 48 +++++++++---------
xen/arch/x86/hvm/io.c | 90 +++++++++++++++++------------------
xen/arch/x86/hvm/platform.c | 32 ++++++------
xen/arch/x86/hvm/pmtimer.c | 4 -
xen/arch/x86/hvm/rtc.c | 6 +-
xen/arch/x86/hvm/svm/svm.c | 4 -
xen/arch/x86/mm/shadow/multi.c | 10 +--
xen/arch/x86/mm/shadow/types.h | 15 +++++
xen/include/asm-x86/hvm/io.h | 2
xen/include/asm-x86/hvm/support.h | 4 -
xen/include/asm-x86/shadow.h | 23 ++------
xen/include/public/hvm/ioreq.h | 8 +--
18 files changed, 191 insertions(+), 207 deletions(-)
diff -r d93280670c3f -r 2db4388fecb9 tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c Tue Oct 31 16:22:39 2006 +0000
+++ b/tools/ioemu/target-i386-dm/helper2.c Tue Oct 31 16:42:46 2006 +0000
@@ -193,10 +193,10 @@ void sp_info()
for (i = 0; i < vcpus; i++) {
req = &(shared_page->vcpu_iodata[i].vp_ioreq);
term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]);
- term_printf(" req state: %x, pvalid: %x, addr: %"PRIx64", "
+ term_printf(" req state: %x, ptr: %x, addr: %"PRIx64", "
"data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
- req->state, req->pdata_valid, req->addr,
- req->u.data, req->count, req->size);
+ req->state, req->data_is_ptr, req->addr,
+ req->data, req->count, req->size);
term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n",
req->io_count);
}
@@ -216,10 +216,10 @@ static ioreq_t *__cpu_get_ioreq(int vcpu
}
fprintf(logfile, "False I/O request ... in-service already: "
- "%x, pvalid: %x, port: %"PRIx64", "
+ "%x, ptr: %x, port: %"PRIx64", "
"data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
- req->state, req->pdata_valid, req->addr,
- req->u.data, req->count, req->size);
+ req->state, req->data_is_ptr, req->addr,
+ req->data, req->count, req->size);
return NULL;
}
@@ -305,26 +305,26 @@ void cpu_ioreq_pio(CPUState *env, ioreq_
sign = req->df ? -1 : 1;
if (req->dir == IOREQ_READ) {
- if (!req->pdata_valid) {
- req->u.data = do_inp(env, req->addr, req->size);
+ if (!req->data_is_ptr) {
+ req->data = do_inp(env, req->addr, req->size);
} else {
unsigned long tmp;
for (i = 0; i < req->count; i++) {
tmp = do_inp(env, req->addr, req->size);
- write_physical((target_phys_addr_t) req->u.pdata
+ write_physical((target_phys_addr_t) req->data
+ (sign * i * req->size),
req->size, &tmp);
}
}
} else if (req->dir == IOREQ_WRITE) {
- if (!req->pdata_valid) {
- do_outp(env, req->addr, req->size, req->u.data);
+ if (!req->data_is_ptr) {
+ do_outp(env, req->addr, req->size, req->data);
} else {
for (i = 0; i < req->count; i++) {
unsigned long tmp;
- read_physical((target_phys_addr_t) req->u.pdata
+ read_physical((target_phys_addr_t) req->data
+ (sign * i * req->size),
req->size, &tmp);
do_outp(env, req->addr, req->size, tmp);
@@ -339,18 +339,18 @@ void cpu_ioreq_move(CPUState *env, ioreq
sign = req->df ? -1 : 1;
- if (!req->pdata_valid) {
+ if (!req->data_is_ptr) {
if (req->dir == IOREQ_READ) {
for (i = 0; i < req->count; i++) {
read_physical(req->addr
+ (sign * i * req->size),
- req->size, &req->u.data);
+ req->size, &req->data);
}
} else if (req->dir == IOREQ_WRITE) {
for (i = 0; i < req->count; i++) {
write_physical(req->addr
+ (sign * i * req->size),
- req->size, &req->u.data);
+ req->size, &req->data);
}
}
} else {
@@ -361,13 +361,13 @@ void cpu_ioreq_move(CPUState *env, ioreq
read_physical(req->addr
+ (sign * i * req->size),
req->size, &tmp);
- write_physical((target_phys_addr_t )req->u.pdata
+ write_physical((target_phys_addr_t )req->data
+ (sign * i * req->size),
req->size, &tmp);
}
} else if (req->dir == IOREQ_WRITE) {
for (i = 0; i < req->count; i++) {
- read_physical((target_phys_addr_t) req->u.pdata
+ read_physical((target_phys_addr_t) req->data
+ (sign * i * req->size),
req->size, &tmp);
write_physical(req->addr
@@ -382,66 +382,66 @@ void cpu_ioreq_and(CPUState *env, ioreq_
{
unsigned long tmp1, tmp2;
- if (req->pdata_valid != 0)
+ if (req->data_is_ptr != 0)
hw_error("expected scalar value");
read_physical(req->addr, req->size, &tmp1);
if (req->dir == IOREQ_WRITE) {
- tmp2 = tmp1 & (unsigned long) req->u.data;
+ tmp2 = tmp1 & (unsigned long) req->data;
write_physical(req->addr, req->size, &tmp2);
}
- req->u.data = tmp1;
+ req->data = tmp1;
}
void cpu_ioreq_add(CPUState *env, ioreq_t *req)
{
unsigned long tmp1, tmp2;
- if (req->pdata_valid != 0)
+ if (req->data_is_ptr != 0)
hw_error("expected scalar value");
read_physical(req->addr, req->size, &tmp1);
if (req->dir == IOREQ_WRITE) {
- tmp2 = tmp1 + (unsigned long) req->u.data;
+ tmp2 = tmp1 + (unsigned long) req->data;
write_physical(req->addr, req->size, &tmp2);
}
- req->u.data = tmp1;
+ req->data = tmp1;
}
void cpu_ioreq_or(CPUState *env, ioreq_t *req)
{
unsigned long tmp1, tmp2;
- if (req->pdata_valid != 0)
+ if (req->data_is_ptr != 0)
hw_error("expected scalar value");
read_physical(req->addr, req->size, &tmp1);
if (req->dir == IOREQ_WRITE) {
- tmp2 = tmp1 | (unsigned long) req->u.data;
+ tmp2 = tmp1 | (unsigned long) req->data;
write_physical(req->addr, req->size, &tmp2);
}
- req->u.data = tmp1;
+ req->data = tmp1;
}
void cpu_ioreq_xor(CPUState *env, ioreq_t *req)
{
unsigned long tmp1, tmp2;
- if (req->pdata_valid != 0)
+ if (req->data_is_ptr != 0)
hw_error("expected scalar value");
read_physical(req->addr, req->size, &tmp1);
if (req->dir == IOREQ_WRITE) {
- tmp2 = tmp1 ^ (unsigned long) req->u.data;
+ tmp2 = tmp1 ^ (unsigned long) req->data;
write_physical(req->addr, req->size, &tmp2);
}
- req->u.data = tmp1;
+ req->data = tmp1;
}
void __handle_ioreq(CPUState *env, ioreq_t *req)
{
- if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
- req->u.data &= (1UL << (8 * req->size)) - 1;
+ if (!req->data_is_ptr && req->dir == IOREQ_WRITE && req->size != 4)
+ req->data &= (1UL << (8 * req->size)) - 1;
switch (req->type) {
case IOREQ_TYPE_PIO:
diff -r d93280670c3f -r 2db4388fecb9 tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c Tue Oct 31 16:22:39 2006 +0000
+++ b/tools/libxc/xc_hvm_build.c Tue Oct 31 16:42:46 2006 +0000
@@ -12,7 +12,6 @@
#include <unistd.h>
#include <zlib.h>
#include <xen/hvm/hvm_info_table.h>
-#include <xen/hvm/ioreq.h>
#include <xen/hvm/params.h>
#include <xen/hvm/e820.h>
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/ia64/vmx/mmio.c Tue Oct 31 16:42:46 2006 +0000
@@ -214,8 +214,8 @@ static void low_mmio_access(VCPU *vcpu,
p->count = 1;
p->dir = dir;
if(dir==IOREQ_WRITE) //write;
- p->u.data = *val;
- p->pdata_valid = 0;
+ p->data = *val;
+ p->data_is_ptr = 0;
p->type = 1;
p->df = 0;
@@ -227,7 +227,7 @@ static void low_mmio_access(VCPU *vcpu,
}else
vmx_send_assist_req(v);
if(dir==IOREQ_READ){ //read
- *val=p->u.data;
+ *val=p->data;
}
return;
}
@@ -249,8 +249,8 @@ static void legacy_io_access(VCPU *vcpu,
p->count = 1;
p->dir = dir;
if(dir==IOREQ_WRITE) //write;
- p->u.data = *val;
- p->pdata_valid = 0;
+ p->data = *val;
+ p->data_is_ptr = 0;
p->type = 0;
p->df = 0;
@@ -258,15 +258,15 @@ static void legacy_io_access(VCPU *vcpu,
vmx_send_assist_req(v);
if(dir==IOREQ_READ){ //read
- *val=p->u.data;
+ *val=p->data;
}
#ifdef DEBUG_PCI
if(dir==IOREQ_WRITE)
if(p->addr == 0xcf8UL)
- printk("Write 0xcf8, with val [0x%lx]\n", p->u.data);
+ printk("Write 0xcf8, with val [0x%lx]\n", p->data);
else
if(p->addr == 0xcfcUL)
- printk("Read 0xcfc, with val [0x%lx]\n", p->u.data);
+ printk("Read 0xcfc, with val [0x%lx]\n", p->data);
#endif //DEBUG_PCI
return;
}
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/hvm.c Tue Oct 31 16:42:46 2006 +0000
@@ -406,16 +406,13 @@ void hvm_hlt(unsigned long rflags)
/*
* __hvm_copy():
* @buf = hypervisor buffer
- * @addr = guest virtual or physical address to copy to/from
+ * @addr = guest physical address to copy to/from
* @size = number of bytes to copy
* @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
- * @phy = interpret addr as physical (TRUE) or virtual (FALSE) address?
* Returns number of bytes failed to copy (0 == complete success).
*/
-static int __hvm_copy(
- void *buf, unsigned long addr, int size, int dir, int phy)
-{
- struct vcpu *v = current;
+static int __hvm_copy(void *buf, paddr_t addr, int size, int dir)
+{
unsigned long mfn;
char *p;
int count, todo;
@@ -425,9 +422,7 @@ static int __hvm_copy(
{
count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
- mfn = phy ?
- get_mfn_from_gpfn(addr >> PAGE_SHIFT) :
- mfn_x(sh_vcpu_gfn_to_mfn(v, shadow_gva_to_gfn(v, addr)));
+ mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
if ( mfn == INVALID_MFN )
return todo;
@@ -448,24 +443,24 @@ static int __hvm_copy(
return 0;
}
-int hvm_copy_to_guest_phys(unsigned long paddr, void *buf, int size)
-{
- return __hvm_copy(buf, paddr, size, 1, 1);
-}
-
-int hvm_copy_from_guest_phys(void *buf, unsigned long paddr, int size)
-{
- return __hvm_copy(buf, paddr, size, 0, 1);
+int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
+{
+ return __hvm_copy(buf, paddr, size, 1);
+}
+
+int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
+{
+ return __hvm_copy(buf, paddr, size, 0);
}
int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
{
- return __hvm_copy(buf, vaddr, size, 1, 0);
+ return __hvm_copy(buf, shadow_gva_to_gpa(current, vaddr), size, 1);
}
int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
{
- return __hvm_copy(buf, vaddr, size, 0, 0);
+ return __hvm_copy(buf, shadow_gva_to_gpa(current, vaddr), size, 0);
}
/*
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/i8254.c Tue Oct 31 16:42:46 2006 +0000
@@ -392,17 +392,17 @@ static int handle_pit_io(ioreq_t *p)
struct PITState *vpit = &(v->domain->arch.hvm_domain.pl_time.vpit);
if (p->size != 1 ||
- p->pdata_valid ||
+ p->data_is_ptr ||
p->type != IOREQ_TYPE_PIO){
printk("HVM_PIT:wrong PIT IO!\n");
return 1;
}
if (p->dir == 0) {/* write */
- pit_ioport_write(vpit, p->addr, p->u.data);
+ pit_ioport_write(vpit, p->addr, p->data);
} else if (p->dir == 1) { /* read */
if ( (p->addr & 3) != 3 ) {
- p->u.data = pit_ioport_read(vpit, p->addr);
+ p->data = pit_ioport_read(vpit, p->addr);
} else {
printk("HVM_PIT: read A1:A0=3!\n");
}
@@ -434,16 +434,16 @@ static int handle_speaker_io(ioreq_t *p)
struct PITState *vpit = &(v->domain->arch.hvm_domain.pl_time.vpit);
if (p->size != 1 ||
- p->pdata_valid ||
+ p->data_is_ptr ||
p->type != IOREQ_TYPE_PIO){
printk("HVM_SPEAKER:wrong SPEAKER IO!\n");
return 1;
}
if (p->dir == 0) {/* write */
- speaker_ioport_write(vpit, p->addr, p->u.data);
+ speaker_ioport_write(vpit, p->addr, p->data);
} else if (p->dir == 1) {/* read */
- p->u.data = speaker_ioport_read(vpit, p->addr);
+ p->data = speaker_ioport_read(vpit, p->addr);
}
return 1;
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/i8259.c
--- a/xen/arch/x86/hvm/i8259.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/i8259.c Tue Oct 31 16:42:46 2006 +0000
@@ -491,11 +491,10 @@ static int intercept_pic_io(ioreq_t *p)
pic = ¤t->domain->arch.hvm_domain.vpic;
if ( p->dir == IOREQ_WRITE ) {
- if ( p->pdata_valid )
- (void)hvm_copy_from_guest_phys(
- &data, (unsigned long)p->u.pdata, p->size);
+ if ( p->data_is_ptr )
+ (void)hvm_copy_from_guest_phys(&data, p->data, p->size);
else
- data = p->u.data;
+ data = p->data;
spin_lock_irqsave(&pic->lock, flags);
pic_ioport_write((void*)&pic->pics[p->addr>>7],
(uint32_t) p->addr, (uint32_t) (data & 0xff));
@@ -506,11 +505,10 @@ static int intercept_pic_io(ioreq_t *p)
data = pic_ioport_read(
(void*)&pic->pics[p->addr>>7], (uint32_t) p->addr);
spin_unlock_irqrestore(&pic->lock, flags);
- if ( p->pdata_valid )
- (void)hvm_copy_to_guest_phys(
- (unsigned long)p->u.pdata, &data, p->size);
+ if ( p->data_is_ptr )
+ (void)hvm_copy_to_guest_phys(p->data, &data, p->size);
else
- p->u.data = (u64)data;
+ p->data = (u64)data;
}
return 1;
}
@@ -528,11 +526,10 @@ static int intercept_elcr_io(ioreq_t *p)
s = ¤t->domain->arch.hvm_domain.vpic;
if ( p->dir == IOREQ_WRITE ) {
- if ( p->pdata_valid )
- (void)hvm_copy_from_guest_phys(
- &data, (unsigned long)p->u.pdata, p->size);
+ if ( p->data_is_ptr )
+ (void)hvm_copy_from_guest_phys(&data, p->data, p->size);
else
- data = p->u.data;
+ data = p->data;
spin_lock_irqsave(&s->lock, flags);
elcr_ioport_write((void*)&s->pics[p->addr&1],
(uint32_t) p->addr, (uint32_t)( data & 0xff));
@@ -543,11 +540,10 @@ static int intercept_elcr_io(ioreq_t *p)
else {
data = (u64) elcr_ioport_read(
(void*)&s->pics[p->addr&1], (uint32_t) p->addr);
- if ( p->pdata_valid )
- (void)hvm_copy_to_guest_phys(
- (unsigned long)p->u.pdata, &data, p->size);
+ if ( p->data_is_ptr )
+ (void)hvm_copy_to_guest_phys(p->data, &data, p->size);
else
- p->u.data = (u64)data;
+ p->data = (u64)data;
}
return 1;
}
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/intercept.c Tue Oct 31 16:42:46 2006 +0000
@@ -67,12 +67,12 @@ static inline void hvm_mmio_access(struc
switch ( p->type ) {
case IOREQ_TYPE_COPY:
{
- if ( !p->pdata_valid ) {
+ if ( !p->data_is_ptr ) {
if ( p->dir == IOREQ_READ )
- p->u.data = read_handler(v, p->addr, p->size);
+ p->data = read_handler(v, p->addr, p->size);
else /* p->dir == IOREQ_WRITE */
- write_handler(v, p->addr, p->size, p->u.data);
- } else { /* !p->pdata_valid */
+ write_handler(v, p->addr, p->size, p->data);
+ } else { /* p->data_is_ptr */
int i, sign = (p->df) ? -1 : 1;
if ( p->dir == IOREQ_READ ) {
@@ -81,7 +81,7 @@ static inline void hvm_mmio_access(struc
p->addr + (sign * i * p->size),
p->size);
(void)hvm_copy_to_guest_phys(
- (unsigned long)p->u.pdata + (sign * i * p->size),
+ p->data + (sign * i * p->size),
&data,
p->size);
}
@@ -89,7 +89,7 @@ static inline void hvm_mmio_access(struc
for ( i = 0; i < p->count; i++ ) {
(void)hvm_copy_from_guest_phys(
&data,
- (unsigned long)p->u.pdata + (sign * i * p->size),
+ p->data + (sign * i * p->size),
p->size);
write_handler(v,
p->addr + (sign * i * p->size),
@@ -103,37 +103,37 @@ static inline void hvm_mmio_access(struc
case IOREQ_TYPE_AND:
tmp1 = read_handler(v, p->addr, p->size);
if ( p->dir == IOREQ_WRITE ) {
- tmp2 = tmp1 & (unsigned long) p->u.data;
- write_handler(v, p->addr, p->size, tmp2);
- }
- p->u.data = tmp1;
+ tmp2 = tmp1 & (unsigned long) p->data;
+ write_handler(v, p->addr, p->size, tmp2);
+ }
+ p->data = tmp1;
break;
case IOREQ_TYPE_ADD:
tmp1 = read_handler(v, p->addr, p->size);
if (p->dir == IOREQ_WRITE) {
- tmp2 = tmp1 + (unsigned long) p->u.data;
- write_handler(v, p->addr, p->size, tmp2);
- }
- p->u.data = tmp1;
+ tmp2 = tmp1 + (unsigned long) p->data;
+ write_handler(v, p->addr, p->size, tmp2);
+ }
+ p->data = tmp1;
break;
case IOREQ_TYPE_OR:
tmp1 = read_handler(v, p->addr, p->size);
if ( p->dir == IOREQ_WRITE ) {
- tmp2 = tmp1 | (unsigned long) p->u.data;
- write_handler(v, p->addr, p->size, tmp2);
- }
- p->u.data = tmp1;
+ tmp2 = tmp1 | (unsigned long) p->data;
+ write_handler(v, p->addr, p->size, tmp2);
+ }
+ p->data = tmp1;
break;
case IOREQ_TYPE_XOR:
tmp1 = read_handler(v, p->addr, p->size);
if ( p->dir == IOREQ_WRITE ) {
- tmp2 = tmp1 ^ (unsigned long) p->u.data;
- write_handler(v, p->addr, p->size, tmp2);
- }
- p->u.data = tmp1;
+ tmp2 = tmp1 ^ (unsigned long) p->data;
+ write_handler(v, p->addr, p->size, tmp2);
+ }
+ p->data = tmp1;
break;
case IOREQ_TYPE_XCHG:
@@ -142,8 +142,8 @@ static inline void hvm_mmio_access(struc
* its own local APIC.
*/
tmp1 = read_handler(v, p->addr, p->size);
- write_handler(v, p->addr, p->size, (unsigned long) p->u.data);
- p->u.data = tmp1;
+ write_handler(v, p->addr, p->size, (unsigned long) p->data);
+ p->data = tmp1;
break;
default:
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/io.c Tue Oct 31 16:42:46 2006 +0000
@@ -365,7 +365,7 @@ static void hvm_pio_assist(struct cpu_us
unsigned long old_eax;
int sign = p->df ? -1 : 1;
- if ( p->pdata_valid || (pio_opp->flags & OVERLAP) )
+ if ( p->data_is_ptr || (pio_opp->flags & OVERLAP) )
{
if ( pio_opp->flags & REPZ )
regs->ecx -= p->count;
@@ -376,9 +376,9 @@ static void hvm_pio_assist(struct cpu_us
{
unsigned long addr = pio_opp->addr;
if ( hvm_paging_enabled(current) )
- (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
+ (void)hvm_copy_to_guest_virt(addr, &p->data, p->size);
else
- (void)hvm_copy_to_guest_phys(addr, &p->u.data, p->size);
+ (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
}
regs->edi += sign * p->count * p->size;
}
@@ -394,13 +394,13 @@ static void hvm_pio_assist(struct cpu_us
switch ( p->size )
{
case 1:
- regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
+ regs->eax = (old_eax & 0xffffff00) | (p->data & 0xff);
break;
case 2:
- regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
+ regs->eax = (old_eax & 0xffff0000) | (p->data & 0xffff);
break;
case 4:
- regs->eax = (p->u.data & 0xffffffff);
+ regs->eax = (p->data & 0xffffffff);
break;
default:
printk("Error: %s unknown port size\n", __FUNCTION__);
@@ -425,7 +425,7 @@ static void hvm_mmio_assist(struct cpu_u
case INSTR_MOV:
if (dst & REGISTER) {
index = operand_index(dst);
- set_reg_value(size, index, 0, regs, p->u.data);
+ set_reg_value(size, index, 0, regs, p->data);
}
break;
@@ -433,15 +433,15 @@ static void hvm_mmio_assist(struct cpu_u
if (dst & REGISTER) {
switch (size) {
case BYTE:
- p->u.data &= 0xFFULL;
+ p->data &= 0xFFULL;
break;
case WORD:
- p->u.data &= 0xFFFFULL;
+ p->data &= 0xFFFFULL;
break;
case LONG:
- p->u.data &= 0xFFFFFFFFULL;
+ p->data &= 0xFFFFFFFFULL;
break;
default:
@@ -449,7 +449,7 @@ static void hvm_mmio_assist(struct cpu_u
domain_crash_synchronous();
}
index = operand_index(dst);
- set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
+ set_reg_value(operand_size(dst), index, 0, regs, p->data);
}
break;
@@ -457,21 +457,21 @@ static void hvm_mmio_assist(struct cpu_u
if (dst & REGISTER) {
switch (size) {
case BYTE:
- p->u.data &= 0xFFULL;
- if ( p->u.data & 0x80ULL )
- p->u.data |= 0xFFFFFFFFFFFFFF00ULL;
+ p->data &= 0xFFULL;
+ if ( p->data & 0x80ULL )
+ p->data |= 0xFFFFFFFFFFFFFF00ULL;
break;
case WORD:
- p->u.data &= 0xFFFFULL;
- if ( p->u.data & 0x8000ULL )
- p->u.data |= 0xFFFFFFFFFFFF0000ULL;
+ p->data &= 0xFFFFULL;
+ if ( p->data & 0x8000ULL )
+ p->data |= 0xFFFFFFFFFFFF0000ULL;
break;
case LONG:
- p->u.data &= 0xFFFFFFFFULL;
- if ( p->u.data & 0x80000000ULL )
- p->u.data |= 0xFFFFFFFF00000000ULL;
+ p->data &= 0xFFFFFFFFULL;
+ if ( p->data & 0x80000000ULL )
+ p->data |= 0xFFFFFFFF00000000ULL;
break;
default:
@@ -479,7 +479,7 @@ static void hvm_mmio_assist(struct cpu_u
domain_crash_synchronous();
}
index = operand_index(dst);
- set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
+ set_reg_value(operand_size(dst), index, 0, regs, p->data);
}
break;
@@ -493,9 +493,9 @@ static void hvm_mmio_assist(struct cpu_u
unsigned long addr = mmio_opp->addr;
if (hvm_paging_enabled(current))
- (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
+ (void)hvm_copy_to_guest_virt(addr, &p->data, p->size);
else
- (void)hvm_copy_to_guest_phys(addr, &p->u.data, p->size);
+ (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
}
regs->esi += sign * p->count * p->size;
@@ -521,14 +521,14 @@ static void hvm_mmio_assist(struct cpu_u
if (src & REGISTER) {
index = operand_index(src);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data & value;
+ diff = (unsigned long) p->data & value;
} else if (src & IMMEDIATE) {
value = mmio_opp->immediate;
- diff = (unsigned long) p->u.data & value;
+ diff = (unsigned long) p->data & value;
} else if (src & MEMORY) {
index = operand_index(dst);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data & value;
+ diff = (unsigned long) p->data & value;
set_reg_value(size, index, 0, regs, diff);
}
@@ -536,14 +536,14 @@ static void hvm_mmio_assist(struct cpu_u
if (src & REGISTER) {
index = operand_index(src);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data + value;
+ diff = (unsigned long) p->data + value;
} else if (src & IMMEDIATE) {
value = mmio_opp->immediate;
- diff = (unsigned long) p->u.data + value;
+ diff = (unsigned long) p->data + value;
} else if (src & MEMORY) {
index = operand_index(dst);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data + value;
+ diff = (unsigned long) p->data + value;
set_reg_value(size, index, 0, regs, diff);
}
@@ -563,14 +563,14 @@ static void hvm_mmio_assist(struct cpu_u
if (src & REGISTER) {
index = operand_index(src);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data | value;
+ diff = (unsigned long) p->data | value;
} else if (src & IMMEDIATE) {
value = mmio_opp->immediate;
- diff = (unsigned long) p->u.data | value;
+ diff = (unsigned long) p->data | value;
} else if (src & MEMORY) {
index = operand_index(dst);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data | value;
+ diff = (unsigned long) p->data | value;
set_reg_value(size, index, 0, regs, diff);
}
@@ -590,14 +590,14 @@ static void hvm_mmio_assist(struct cpu_u
if (src & REGISTER) {
index = operand_index(src);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data ^ value;
+ diff = (unsigned long) p->data ^ value;
} else if (src & IMMEDIATE) {
value = mmio_opp->immediate;
- diff = (unsigned long) p->u.data ^ value;
+ diff = (unsigned long) p->data ^ value;
} else if (src & MEMORY) {
index = operand_index(dst);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data ^ value;
+ diff = (unsigned long) p->data ^ value;
set_reg_value(size, index, 0, regs, diff);
}
@@ -618,14 +618,14 @@ static void hvm_mmio_assist(struct cpu_u
if (src & REGISTER) {
index = operand_index(src);
value = get_reg_value(size, index, 0, regs);
- diff = (unsigned long) p->u.data - value;
+ diff = (unsigned long) p->data - value;
} else if (src & IMMEDIATE) {
value = mmio_opp->immediate;
- diff = (unsigned long) p->u.data - value;
+ diff = (unsigned long) p->data - value;
} else if (src & MEMORY) {
index = operand_index(dst);
value = get_reg_value(size, index, 0, regs);
- diff = value - (unsigned long) p->u.data;
+ diff = value - (unsigned long) p->data;
if ( mmio_opp->instr == INSTR_SUB )
set_reg_value(size, index, 0, regs, diff);
}
@@ -636,9 +636,9 @@ static void hvm_mmio_assist(struct cpu_u
*/
regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
- set_eflags_CF(size, value, (unsigned long) p->u.data, regs);
- set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs);
- set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs);
+ set_eflags_CF(size, value, (unsigned long) p->data, regs);
+ set_eflags_OF(size, diff, value, (unsigned long) p->data, regs);
+ set_eflags_AF(size, diff, value, (unsigned long) p->data, regs);
set_eflags_ZF(size, diff, regs);
set_eflags_SF(size, diff, regs);
set_eflags_PF(size, diff, regs);
@@ -654,7 +654,7 @@ static void hvm_mmio_assist(struct cpu_u
index = operand_index(dst);
value = get_reg_value(size, index, 0, regs);
}
- diff = (unsigned long) p->u.data & value;
+ diff = (unsigned long) p->data & value;
/*
* Sets the SF, ZF, and PF status flags. CF and OF are set to 0
@@ -674,7 +674,7 @@ static void hvm_mmio_assist(struct cpu_u
}
else if ( src & IMMEDIATE )
value = mmio_opp->immediate;
- if (p->u.data & (1 << (value & ((1 << 5) - 1))))
+ if (p->data & (1 << (value & ((1 << 5) - 1))))
regs->eflags |= X86_EFLAGS_CF;
else
regs->eflags &= ~X86_EFLAGS_CF;
@@ -684,10 +684,10 @@ static void hvm_mmio_assist(struct cpu_u
case INSTR_XCHG:
if (src & REGISTER) {
index = operand_index(src);
- set_reg_value(size, index, 0, regs, p->u.data);
+ set_reg_value(size, index, 0, regs, p->data);
} else {
index = operand_index(dst);
- set_reg_value(size, index, 0, regs, p->u.data);
+ set_reg_value(size, index, 0, regs, p->data);
}
break;
}
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/platform.c Tue Oct 31 16:42:46 2006 +0000
@@ -741,7 +741,7 @@ static void hvm_send_assist_req(struct v
}
void send_pio_req(unsigned long port, unsigned long count, int size,
- long value, int dir, int df, int pvalid)
+ long value, int dir, int df, int value_is_ptr)
{
struct vcpu *v = current;
vcpu_iodata_t *vio;
@@ -749,8 +749,8 @@ void send_pio_req(unsigned long port, un
if ( size == 0 || count == 0 ) {
printk("null pio request? port %lx, count %lx, "
- "size %d, value %lx, dir %d, pvalid %d.\n",
- port, count, size, value, dir, pvalid);
+ "size %d, value %lx, dir %d, value_is_ptr %d.\n",
+ port, count, size, value, dir, value_is_ptr);
}
vio = get_vio(v->domain, v->vcpu_id);
@@ -765,7 +765,7 @@ void send_pio_req(unsigned long port, un
p->state);
p->dir = dir;
- p->pdata_valid = pvalid;
+ p->data_is_ptr = value_is_ptr;
p->type = IOREQ_TYPE_PIO;
p->size = size;
@@ -775,14 +775,14 @@ void send_pio_req(unsigned long port, un
p->io_count++;
- if ( pvalid ) /* get physical address of data */
+ if ( value_is_ptr ) /* get physical address of data */
{
if ( hvm_paging_enabled(current) )
- p->u.pdata = (void *)shadow_gva_to_gpa(current, value);
+ p->data = shadow_gva_to_gpa(current, value);
else
- p->u.pdata = (void *)value; /* guest VA == guest PA */
+ p->data = value; /* guest VA == guest PA */
} else if ( dir == IOREQ_WRITE )
- p->u.data = value;
+ p->data = value;
if ( hvm_portio_intercept(p) ) {
p->state = STATE_IORESP_READY;
@@ -795,7 +795,7 @@ void send_pio_req(unsigned long port, un
static void send_mmio_req(unsigned char type, unsigned long gpa,
unsigned long count, int size, long value,
- int dir, int df, int pvalid)
+ int dir, int df, int value_is_ptr)
{
struct vcpu *v = current;
vcpu_iodata_t *vio;
@@ -803,8 +803,8 @@ static void send_mmio_req(unsigned char
if ( size == 0 || count == 0 ) {
printk("null mmio request? type %d, gpa %lx, "
- "count %lx, size %d, value %lx, dir %d, pvalid %d.\n",
- type, gpa, count, size, value, dir, pvalid);
+ "count %lx, size %d, value %lx, dir %d, value_is_ptr %d.\n",
+ type, gpa, count, size, value, dir, value_is_ptr);
}
vio = get_vio(v->domain, v->vcpu_id);
@@ -819,7 +819,7 @@ static void send_mmio_req(unsigned char
printk("WARNING: send mmio with something already pending (%d)?\n",
p->state);
p->dir = dir;
- p->pdata_valid = pvalid;
+ p->data_is_ptr = value_is_ptr;
p->type = type;
p->size = size;
@@ -829,13 +829,13 @@ static void send_mmio_req(unsigned char
p->io_count++;
- if (pvalid) {
+ if (value_is_ptr) {
if (hvm_paging_enabled(v))
- p->u.data = shadow_gva_to_gpa(v, value);
+ p->data = shadow_gva_to_gpa(v, value);
else
- p->u.pdata = (void *) value; /* guest VA == guest PA */
+ p->data = value; /* guest VA == guest PA */
} else
- p->u.data = value;
+ p->data = value;
if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) {
p->state = STATE_IORESP_READY;
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/pmtimer.c
--- a/xen/arch/x86/hvm/pmtimer.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/pmtimer.c Tue Oct 31 16:42:46 2006 +0000
@@ -20,7 +20,7 @@ static int handle_pmt_io(ioreq_t *p)
uint64_t curr_gtime;
if (p->size != 4 ||
- p->pdata_valid ||
+ p->data_is_ptr ||
p->type != IOREQ_TYPE_PIO){
printk("HVM_PMT: wrong PM timer IO\n");
return 1;
@@ -32,7 +32,7 @@ static int handle_pmt_io(ioreq_t *p)
} else if (p->dir == 1) { /* read */
curr_gtime = hvm_get_guest_time(s->vcpu);
s->pm1_timer += ((curr_gtime - s->last_gtime) * s->scale) >> 32;
- p->u.data = s->pm1_timer;
+ p->data = s->pm1_timer;
s->last_gtime = curr_gtime;
return 1;
}
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/rtc.c
--- a/xen/arch/x86/hvm/rtc.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/rtc.c Tue Oct 31 16:42:46 2006 +0000
@@ -345,17 +345,17 @@ static int handle_rtc_io(ioreq_t *p)
struct RTCState *vrtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
if (p->size != 1 ||
- p->pdata_valid ||
+ p->data_is_ptr ||
p->type != IOREQ_TYPE_PIO){
printk("HVM_RTC: wrong RTC IO!\n");
return 1;
}
if (p->dir == 0) { /* write */
- if (rtc_ioport_write(vrtc, p->addr, p->u.data & 0xFF))
+ if (rtc_ioport_write(vrtc, p->addr, p->data & 0xFF))
return 1;
} else if (p->dir == 1 && vrtc->cmos_index < RTC_SIZE) { /* read */
- p->u.data = rtc_ioport_read(vrtc, p->addr);
+ p->data = rtc_ioport_read(vrtc, p->addr);
return 1;
}
return 0;
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Tue Oct 31 16:42:46 2006 +0000
@@ -2545,10 +2545,10 @@ void walk_shadow_and_guest_pt(unsigned l
l1_pgentry_t spte;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- unsigned long gpa;
+ paddr_t gpa;
gpa = shadow_gva_to_gpa(current, gva);
- printk( "gva = %lx, gpa=%lx, gCR3=%x\n", gva, gpa, (u32)vmcb->cr3 );
+ printk("gva = %lx, gpa=%"PRIpaddr", gCR3=%x\n", gva, gpa, (u32)vmcb->cr3);
if( !svm_paging_enabled(v) || mmio_space(gpa) )
return;
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c Tue Oct 31 16:42:46 2006 +0000
@@ -40,10 +40,6 @@
* supporting only HVM guests (and so only "external" shadow mode).
*
* THINGS TO DO LATER:
- *
- * FIX GVA_TO_GPA
- * The current interface returns an unsigned long, which is not big enough
- * to hold a physical address in PAE. Should return a gfn instead.
*
* TEARDOWN HEURISTICS
* Also: have a heuristic for when to destroy a previous paging-mode's
@@ -2837,7 +2833,7 @@ static int sh_page_fault(struct vcpu *v,
perfc_incrc(shadow_fault_mmio);
sh_audit_gw(v, &gw);
unmap_walk(v, &gw);
- SHADOW_PRINTK("mmio\n");
+ SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
shadow_audit_tables(v);
reset_early_unshadow(v);
shadow_unlock(d);
@@ -2941,7 +2937,7 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l
}
-static unsigned long
+static paddr_t
sh_gva_to_gpa(struct vcpu *v, unsigned long va)
/* Called to translate a guest virtual address to what the *guest*
* pagetables would map it to. */
@@ -2950,7 +2946,7 @@ sh_gva_to_gpa(struct vcpu *v, unsigned l
if ( gfn == INVALID_GFN )
return 0;
else
- return (gfn << PAGE_SHIFT) | (va & ~PAGE_MASK);
+ return (((paddr_t)gfn) << PAGE_SHIFT) + (va & ~PAGE_MASK);
}
diff -r d93280670c3f -r 2db4388fecb9 xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/arch/x86/mm/shadow/types.h Tue Oct 31 16:42:46 2006 +0000
@@ -404,11 +404,22 @@ valid_gfn(gfn_t m)
}
/* Translation between mfns and gfns */
+
+// vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty
+// little secret that, for hvm guests with paging disabled, nearly all of the
+// shadow code actually think that the guest is running on *untranslated* page
+// tables (which is actually domain->phys_table).
+//
+
static inline mfn_t
vcpu_gfn_to_mfn(struct vcpu *v, gfn_t gfn)
{
- return sh_vcpu_gfn_to_mfn(v, gfn_x(gfn));
-}
+ if ( !shadow_vcpu_mode_translate(v) )
+ return _mfn(gfn_x(gfn));
+ if ( likely(current->domain == v->domain) )
+ return _mfn(get_mfn_from_gpfn(gfn_x(gfn)));
+ return sh_gfn_to_mfn_foreign(v->domain, gfn_x(gfn));
+}
static inline gfn_t
mfn_to_gfn(struct domain *d, mfn_t mfn)
diff -r d93280670c3f -r 2db4388fecb9 xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/include/asm-x86/hvm/io.h Tue Oct 31 16:42:46 2006 +0000
@@ -142,7 +142,7 @@ static inline int irq_masked(unsigned lo
#endif
extern void send_pio_req(unsigned long port, unsigned long count, int size,
- long value, int dir, int df, int pvalid);
+ long value, int dir, int df, int value_is_ptr);
extern void handle_mmio(unsigned long gpa);
extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
extern void hvm_io_assist(struct vcpu *v);
diff -r d93280670c3f -r 2db4388fecb9 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/include/asm-x86/hvm/support.h Tue Oct 31 16:42:46 2006 +0000
@@ -136,8 +136,8 @@ extern unsigned int opt_hvm_debug_level;
extern int hvm_enabled;
-int hvm_copy_to_guest_phys(unsigned long paddr, void *buf, int size);
-int hvm_copy_from_guest_phys(void *buf, unsigned long paddr, int size);
+int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
+int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
diff -r d93280670c3f -r 2db4388fecb9 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/include/asm-x86/shadow.h Tue Oct 31 16:42:46 2006 +0000
@@ -259,7 +259,7 @@ struct shadow_paging_mode {
int (*page_fault )(struct vcpu *v, unsigned long va,
struct cpu_user_regs *regs);
int (*invlpg )(struct vcpu *v, unsigned long va);
- unsigned long (*gva_to_gpa )(struct vcpu *v, unsigned long va);
+ paddr_t (*gva_to_gpa )(struct vcpu *v, unsigned long va);
unsigned long (*gva_to_gfn )(struct vcpu *v, unsigned long va);
void (*update_cr3 )(struct vcpu *v);
int (*map_and_validate_gl1e )(struct vcpu *v, mfn_t gmfn,
@@ -368,11 +368,13 @@ shadow_invlpg(struct vcpu *v, unsigned l
return v->arch.shadow.mode->invlpg(v, va);
}
-static inline unsigned long
+static inline paddr_t
shadow_gva_to_gpa(struct vcpu *v, unsigned long va)
/* Called to translate a guest virtual address to what the *guest*
* pagetables would map it to. */
{
+ if ( unlikely(!shadow_vcpu_mode_translate(v)) )
+ return (paddr_t) va;
return v->arch.shadow.mode->gva_to_gpa(v, va);
}
@@ -381,6 +383,8 @@ shadow_gva_to_gfn(struct vcpu *v, unsign
/* Called to translate a guest virtual address to what the *guest*
* pagetables would map it to. */
{
+ if ( unlikely(!shadow_vcpu_mode_translate(v)) )
+ return va >> PAGE_SHIFT;
return v->arch.shadow.mode->gva_to_gfn(v, va);
}
@@ -673,21 +677,6 @@ sh_gfn_to_mfn(struct domain *d, unsigned
return sh_gfn_to_mfn_foreign(d, gfn);
}
-// vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty
-// little secret that, for hvm guests with paging disabled, nearly all of the
-// shadow code actually think that the guest is running on *untranslated* page
-// tables (which is actually domain->phys_table).
-//
-static inline mfn_t
-sh_vcpu_gfn_to_mfn(struct vcpu *v, unsigned long gfn)
-{
- if ( !shadow_vcpu_mode_translate(v) )
- return _mfn(gfn);
- if ( likely(current->domain == v->domain) )
- return _mfn(get_mfn_from_gpfn(gfn));
- return sh_gfn_to_mfn_foreign(v->domain, gfn);
-}
-
static inline unsigned long
sh_mfn_to_gfn(struct domain *d, mfn_t mfn)
{
diff -r d93280670c3f -r 2db4388fecb9 xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h Tue Oct 31 16:22:39 2006 +0000
+++ b/xen/include/public/hvm/ioreq.h Tue Oct 31 16:42:46 2006 +0000
@@ -45,12 +45,10 @@ struct ioreq {
uint64_t addr; /* physical address */
uint64_t size; /* size in bytes */
uint64_t count; /* for rep prefixes */
- union {
- uint64_t data; /* data */
- void *pdata; /* pointer to data */
- } u;
+ uint64_t data; /* data (or paddr of data) */
uint8_t state:4;
- uint8_t pdata_valid:1; /* if 1, use pdata above */
+ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
+ * of the real data to use. */
uint8_t dir:1; /* 1=read, 0=write */
uint8_t df:1;
uint8_t type; /* I/O type */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|