|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 08/16] x86/hvm: unify stdvga mmio intercept with standard mmio intercept
It's clear from the following check in hvmemul_rep_movs:
if ( sp2mt == p2m_mmio_direct || dp2mt == p2m_mmio_direct ||
(sp2mt == p2m_mmio_dm && dp2mt == p2m_mmio_dm) )
return X86EMUL_UNHANDLEABLE;
that mmio <-> mmio copy is not handled. This means the code in the
stdvga mmio intercept that explicitly handles mmio <-> mmio copy when
hvm_copy_to/from_guest_phys() fails is never going to be executed.
This patch therefore adds a check in hvmemul_do_io_addr() to make sure
mmio <-> mmio is disallowed and then registers standard mmio intercept ops
in stdvga_init().
With this patch all mmio and portio handled within Xen now goes through
process_io_intercept().
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 9 ++
xen/arch/x86/hvm/intercept.c | 30 ++++--
xen/arch/x86/hvm/stdvga.c | 209 +++++++++++++++---------------------------
xen/include/asm-x86/hvm/io.h | 10 +-
4 files changed, 110 insertions(+), 148 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 20bf372..1afd626 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -266,6 +266,15 @@ static int hvmemul_acquire_page(unsigned long gmfn, struct
page_info **page)
return X86EMUL_RETRY;
}
+ /* This code should not be reached if the gmfn is not RAM */
+ if ( p2m_is_mmio(p2mt) )
+ {
+ domain_crash(curr_d);
+
+ put_page(*page);
+ return X86EMUL_UNHANDLEABLE;
+ }
+
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 91968c4..78e2f57 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -269,20 +269,21 @@ const struct hvm_io_handler *hvm_find_io_handler(ioreq_t
*p)
int hvm_io_intercept(ioreq_t *p)
{
const struct hvm_io_handler *handler;
-
- if ( p->type == IOREQ_TYPE_COPY )
- {
- int rc = stdvga_intercept_mmio(p);
- if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) )
- return rc;
- }
+ const struct hvm_io_ops *ops;
+ int rc;
handler = hvm_find_io_handler(p);
if ( handler == NULL )
return X86EMUL_UNHANDLEABLE;
- return hvm_process_io_intercept(handler, p);
+ rc = hvm_process_io_intercept(handler, p);
+
+ ops = handler->ops;
+ if ( ops->complete != NULL )
+ ops->complete(handler);
+
+ return rc;
}
struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
@@ -345,6 +346,8 @@ void relocate_portio_handler(struct domain *d, uint16_t
old_port,
bool_t hvm_mmio_internal(paddr_t gpa)
{
+ const struct hvm_io_handler *handler;
+ const struct hvm_io_ops *ops;
ioreq_t p = {
.type = IOREQ_TYPE_COPY,
.addr = gpa,
@@ -352,7 +355,16 @@ bool_t hvm_mmio_internal(paddr_t gpa)
.size = 1,
};
- return (hvm_find_io_handler(&p) != NULL);
+ handler = hvm_find_io_handler(&p);
+
+ if ( handler == NULL )
+ return 0;
+
+ ops = handler->ops;
+ if ( ops->complete != NULL )
+ ops->complete(handler);
+
+ return 1;
}
/*
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 30760fa..b17e400 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -148,7 +148,7 @@ static int stdvga_outb(uint64_t addr, uint8_t val)
}
else if ( prev_stdvga && !s->stdvga )
{
- gdprintk(XENLOG_INFO, "leaving stdvga\n");
+ gdprintk(XENLOG_INFO, "leaving stdvga mode\n");
}
return rc;
@@ -275,9 +275,10 @@ static uint8_t stdvga_mem_readb(uint64_t addr)
return ret;
}
-static uint64_t stdvga_mem_read(uint64_t addr, uint64_t size)
+static int stdvga_mem_read(const struct hvm_io_handler *handler,
+ uint64_t addr, uint32_t size, uint64_t *p_data)
{
- uint64_t data = 0;
+ uint64_t data = ~0ul;
switch ( size )
{
@@ -309,11 +310,12 @@ static uint64_t stdvga_mem_read(uint64_t addr, uint64_t
size)
break;
default:
- gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size);
+ gdprintk(XENLOG_WARNING, "invalid io size: %u\n", size);
break;
}
- return data;
+ *p_data = data;
+ return X86EMUL_OKAY;
}
static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
@@ -424,8 +426,22 @@ static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
}
}
-static void stdvga_mem_write(uint64_t addr, uint64_t data, uint64_t size)
+static int stdvga_mem_write(const struct hvm_io_handler *handler,
+ uint64_t addr, uint32_t size,
+ uint64_t data)
{
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ ioreq_t p = { .type = IOREQ_TYPE_COPY,
+ .addr = addr,
+ .size = size,
+ .count = 1,
+ .dir = IOREQ_WRITE,
+ .data = data,
+ };
+
+ if ( !s->cache )
+ goto done;
+
/* Intercept mmio write */
switch ( size )
{
@@ -457,156 +473,72 @@ static void stdvga_mem_write(uint64_t addr, uint64_t
data, uint64_t size)
break;
default:
- gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size);
+ gdprintk(XENLOG_WARNING, "invalid io size: %u\n", size);
break;
}
-}
-
-static uint32_t read_data;
-
-static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
-{
- int i;
- uint64_t addr = p->addr;
- p2m_type_t p2mt;
- struct domain *d = current->domain;
- int step = p->df ? -p->size : p->size;
-
- if ( p->data_is_ptr )
- {
- uint64_t data = p->data, tmp;
- if ( p->dir == IOREQ_READ )
- {
- for ( i = 0; i < p->count; i++ )
- {
- tmp = stdvga_mem_read(addr, p->size);
- if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
- HVMCOPY_okay )
- {
- struct page_info *dp = get_page_from_gfn(
- d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
- /*
- * The only case we handle is vga_mem <-> vga_mem.
- * Anything else disables caching and leaves it to qemu-dm.
- */
- if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
- ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
- {
- if ( dp )
- put_page(dp);
- return 0;
- }
- ASSERT(!dp);
- stdvga_mem_write(data, tmp, p->size);
- }
- data += step;
- addr += step;
- }
- }
- else
- {
- for ( i = 0; i < p->count; i++ )
- {
- if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
- HVMCOPY_okay )
- {
- struct page_info *dp = get_page_from_gfn(
- d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
- if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
- ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
- {
- if ( dp )
- put_page(dp);
- return 0;
- }
- ASSERT(!dp);
- tmp = stdvga_mem_read(data, p->size);
- }
- stdvga_mem_write(addr, tmp, p->size);
- data += step;
- addr += step;
- }
- }
- }
- else if ( p->dir == IOREQ_WRITE )
- {
- for ( i = 0; i < p->count; i++ )
- {
- stdvga_mem_write(addr, p->data, p->size);
- addr += step;
- }
- }
- else
- {
- ASSERT(p->count == 1);
- p->data = stdvga_mem_read(addr, p->size);
- }
+ done:
+ if ( hvm_buffered_io_send(&p) )
+ return X86EMUL_OKAY;
- read_data = p->data;
- return 1;
+ return X86EMUL_UNHANDLEABLE;
}
-int stdvga_intercept_mmio(ioreq_t *p)
+static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
+ const ioreq_t *p)
{
- struct domain *d = current->domain;
- struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
- uint64_t start, end, count = p->count, size = p->size;
- int buf = 0, rc;
-
- if ( p->df )
- {
- start = (p->addr - (count - 1) * size);
- end = p->addr + size;
- }
- else
- {
- start = p->addr;
- end = p->addr + count * size;
- }
-
- if ( (start < VGA_MEM_BASE) || (end > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
- return X86EMUL_UNHANDLEABLE;
-
- if ( p->size > 8 )
- {
- gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
- return X86EMUL_UNHANDLEABLE;
- }
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
spin_lock(&s->lock);
- if ( s->stdvga && s->cache )
+ if ( !s->stdvga ||
+ (p->addr < VGA_MEM_BASE) ||
+ ((p->addr + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+ goto reject;
+
+ if ( p->dir == IOREQ_WRITE && p->count > 1 )
{
- switch ( p->type )
+ /*
+ * We cannot return X86EMUL_UNHANDLEABLE on anything other then the
+ * first cycle of an I/O. So, since we cannot guarantee to always be
+ * able to buffer writes, we have to reject any multi-cycle I/O and,
+ * since we are rejecting an I/O, we must invalidate the cache.
+ * Single-cycle write transactions are accepted even if the cache is
+ * not active since we can assert, when in stdvga mode, that writes
+ * to VRAM have no side effect and thus we can try to buffer them.
+ */
+ if ( s->cache )
{
- case IOREQ_TYPE_COPY:
- buf = mmio_move(s, p);
- if ( !buf )
- s->cache = 0;
- break;
- default:
- gdprintk(XENLOG_WARNING, "unsupported mmio request type:%d "
- "addr:0x%04x data:0x%04x size:%d count:%d state:%d "
- "isptr:%d dir:%d df:%d\n",
- p->type, (int)p->addr, (int)p->data, (int)p->size,
- (int)p->count, p->state,
- p->data_is_ptr, p->dir, p->df);
+ gdprintk(XENLOG_INFO, "leaving caching mode\n");
s->cache = 0;
}
+
+ goto reject;
}
- else
- {
- buf = (p->dir == IOREQ_WRITE);
- }
+ else if ( p->dir == IOREQ_READ && !s->cache )
+ goto reject;
- rc = (buf && hvm_buffered_io_send(p));
+ return 1;
+ reject:
spin_unlock(&s->lock);
+ return 0;
+}
- return rc ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE;
+static void stdvga_mem_complete(const struct hvm_io_handler *handler)
+{
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+
+ spin_unlock(&s->lock);
}
+static const struct hvm_io_ops stdvga_mem_ops = {
+ .accept = stdvga_mem_accept,
+ .read = stdvga_mem_read,
+ .write = stdvga_mem_write,
+ .complete = stdvga_mem_complete
+};
+
void stdvga_init(struct domain *d)
{
struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
@@ -630,10 +562,17 @@ void stdvga_init(struct domain *d)
if ( i == ARRAY_SIZE(s->vram_page) )
{
+ struct hvm_io_handler *handler;
+
/* Sequencer registers. */
register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio);
/* Graphics registers. */
register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio);
+
+ /* VGA memory */
+ handler = hvm_next_io_handler(d);
+ handler->type = IOREQ_TYPE_COPY;
+ handler->ops = &stdvga_mem_ops;
}
}
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 9503e6e..8394959 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -86,10 +86,13 @@ typedef int (*hvm_io_write_t)(const struct hvm_io_handler *,
uint64_t data);
typedef bool_t (*hvm_io_accept_t)(const struct hvm_io_handler *,
const ioreq_t *p);
+typedef void (*hvm_io_complete_t)(const struct hvm_io_handler *);
+
struct hvm_io_ops {
- hvm_io_accept_t accept;
- hvm_io_read_t read;
- hvm_io_write_t write;
+ hvm_io_accept_t accept;
+ hvm_io_read_t read;
+ hvm_io_write_t write;
+ hvm_io_complete_t complete;
};
int hvm_process_io_intercept(const struct hvm_io_handler *handler,
@@ -141,7 +144,6 @@ struct hvm_hw_stdvga {
};
void stdvga_init(struct domain *d);
-int stdvga_intercept_mmio(ioreq_t *p);
void stdvga_deinit(struct domain *d);
extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |