[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 06/16] x86/hvm: add length to mmio check op



When memory mapped I/O is range checked by internal handlers, the length
of the access should be taken into account.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/intercept.c |   23 ++++++++++++++++++++---
 xen/include/asm-x86/hvm/io.h |   16 ++++++++++++++++
 2 files changed, 36 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 6344c65..ccef38e 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -35,9 +35,20 @@
 static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
                               const ioreq_t *p)
 {
+    paddr_t first = hvm_mmio_first_byte(p);
+    paddr_t last = hvm_mmio_last_byte(p);
+
     BUG_ON(handler->type != IOREQ_TYPE_COPY);
 
-    return handler->mmio.ops->check(current, p->addr);
+    if ( !handler->mmio.ops->check(current, first) )
+        return 0;
+
+    /* Make sure the handler will accept the whole access */
+    if ( p->size > 1 &&
+         !handler->mmio.ops->check(current, last) )
+        domain_crash(current->domain);
+
+    return 1;
 }
 
 static int hvm_mmio_read(const struct hvm_io_handler *handler,
@@ -112,7 +123,8 @@ static const struct hvm_io_ops portio_ops = {
 int hvm_process_io_intercept(const struct hvm_io_handler *handler,
                              ioreq_t *p)
 {
-    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
+    struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     const struct hvm_io_ops *ops =
         (p->type == IOREQ_TYPE_COPY) ?
         &mmio_ops :
@@ -223,6 +235,9 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
 
     if ( i != 0 )
     {
+        if ( rc == X86EMUL_UNHANDLEABLE )
+            domain_crash(curr->domain);
+
         p->count = i;
         rc = X86EMUL_OKAY;
     }
@@ -342,7 +357,9 @@ bool_t hvm_mmio_internal(paddr_t gpa)
 {
     ioreq_t p = {
         .type = IOREQ_TYPE_COPY,
-        .addr = gpa
+        .addr = gpa,
+        .count = 1,
+        .size = 1,
     };
 
     return (hvm_find_io_handler(&p) != NULL);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 461a134..4fc2336 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -43,6 +43,22 @@ struct hvm_mmio_ops {
     hvm_mmio_write_t write;
 };
 
+static inline paddr_t hvm_mmio_first_byte(const ioreq_t *p)
+{
+    return p->df ?
+           p->addr - (p->count - 1ul) * p->size :
+           p->addr;
+}
+
+static inline paddr_t hvm_mmio_last_byte(const ioreq_t *p)
+{
+    unsigned long count = p->count;
+
+    return p->df ?
+           p->addr + p->size - 1:
+           p->addr + (count * p->size) - 1;
+}
+
 typedef int (*portio_action_t)(
     int dir, uint16_t port, unsigned int bytes, uint32_t *val);
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.