[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC][PATCH v2 1/1] Add IOREQ_TYPE_VMWARE_PORT



Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
v2:
    Fixup usage of hvmtrace_io_assist().
    VMware only changes the 32bit part of the register.
       Added vmware_ioreq_t

 xen/arch/x86/hvm/emulate.c        | 72 +++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/io.c             | 19 +++++++++++
 xen/arch/x86/hvm/vmware/vmport.c  | 24 ++++++++++++-
 xen/include/asm-x86/hvm/emulate.h |  2 ++
 xen/include/asm-x86/hvm/vcpu.h    |  1 +
 xen/include/public/hvm/ioreq.h    | 19 +++++++++++
 6 files changed, 136 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index c0f47d2..215f049 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -50,6 +50,78 @@ static void hvmtrace_io_assist(int is_mmio, ioreq_t *p)
     trace_var(event, 0/*!cycles*/, size, buffer);
 }
 
+int hvmemul_do_vmport(uint16_t addr, int size, int dir,
+                      struct cpu_user_regs *regs)
+{
+    struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+    vmware_ioreq_t p = {
+        .type = IOREQ_TYPE_VMWARE_PORT,
+        .addr = addr,
+        .size = size,
+        .dir = dir,
+        .eax = regs->rax,
+        .ebx = regs->rbx,
+        .ecx = regs->rcx,
+        .edx = regs->rdx,
+        .esi = regs->rsi,
+        .edi = regs->rdi,
+    };
+    ioreq_t *pp = (ioreq_t *)&p;
+    ioreq_t op;
+
+    ASSERT(sizeof(p) == sizeof(op));
+    ASSERT(offsetof(ioreq_t, type) == offsetof(vmware_ioreq_t, type));
+    ASSERT(offsetof(ioreq_t, vp_eport) == offsetof(vmware_ioreq_t, vp_eport));
+
+    switch ( vio->io_state )
+    {
+    case HVMIO_none:
+        break;
+    case HVMIO_completed:
+        vio->io_state = HVMIO_none;
+        goto finish_access_vmport;
+    case HVMIO_dispatched:
+        /* May have to wait for previous cycle of a multi-write to complete. */
+    default:
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    if ( hvm_io_pending(curr) )
+    {
+        gdprintk(XENLOG_WARNING, "WARNING: io already pending?\n");
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    vio->io_state = HVMIO_handle_vmport_awaiting_completion;
+    vio->io_size = size;
+
+    /* If there is no backing DM, just ignore accesses */
+    if ( !hvm_has_dm(curr->domain) )
+    {
+        vio->io_state = HVMIO_none;
+        vio->io_data = ~0ul;
+    }
+    else
+    {
+        if ( !hvm_send_assist_req(pp) )
+            vio->io_state = HVMIO_none;
+        return X86EMUL_RETRY;
+    }
+
+ finish_access_vmport:
+    memset(&op, 0, sizeof(op));
+    op.dir = dir;
+    op.addr = (uint16_t)regs->rdx;
+    op.data_is_ptr = 0;
+    op.data = vio->io_data;
+    hvmtrace_io_assist(0, &op);
+
+    memcpy(&regs->rax, &vio->io_data, vio->io_size);
+
+    return X86EMUL_OKAY;
+}
+
 static int hvmemul_do_io(
     int is_mmio, paddr_t addr, unsigned long *reps, int size,
     paddr_t ram_gpa, int dir, int df, void *p_data)
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 68fb890..a8bf324 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -197,6 +197,25 @@ void hvm_io_assist(ioreq_t *p)
         else
             memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
         break;
+    case HVMIO_handle_vmport_awaiting_completion:
+    {
+        struct cpu_user_regs *regs = guest_cpu_user_regs();
+        vmware_ioreq_t *vp = (vmware_ioreq_t *)p;
+
+        ASSERT(sizeof(*p) == sizeof(*vp));
+        ASSERT(offsetof(ioreq_t, type) == offsetof(vmware_ioreq_t, type));
+        ASSERT(offsetof(ioreq_t, vp_eport) == offsetof(vmware_ioreq_t, 
vp_eport));
+        
+        /* Always zero extension for eax */
+        regs->rax = vp->eax;
+        /* Only change the 32bit part of the register */
+        regs->rbx = (regs->rbx & 0xffffffff00000000ull) | vp->ebx;
+        regs->rcx = (regs->rcx & 0xffffffff00000000ull) | vp->ecx;
+        regs->rdx = (regs->rdx & 0xffffffff00000000ull) | vp->edx;
+        regs->rsi = (regs->rsi & 0xffffffff00000000ull) | vp->esi;
+        regs->rdi = (regs->rdi & 0xffffffff00000000ull) | vp->edi;
+    }
+        break;
     default:
         break;
     }
diff --git a/xen/arch/x86/hvm/vmware/vmport.c b/xen/arch/x86/hvm/vmware/vmport.c
index 962ee32..0649106 100644
--- a/xen/arch/x86/hvm/vmware/vmport.c
+++ b/xen/arch/x86/hvm/vmware/vmport.c
@@ -147,9 +147,31 @@ int vmport_ioport(int dir, uint32_t port, uint32_t bytes, 
uint32_t *val)
             regs->rax = 0x0;
             break;
         default:
+        {   /* Let backing DM handle */
+            int rc;
+
             HVMTRACE_ND(VMPORT_UNKNOWN, 0, 1/*cycles*/, 6,
-                        (bytes << 8) + dir, cmd, regs->rbx,
+                        (bytes << 8) | dir, cmd, regs->rbx,
                         regs->rcx, regs->rsi, regs->rdi);
+            rc = hvmemul_do_vmport(BDOOR_PORT, bytes, dir, regs);
+            switch (rc)
+            {
+            case X86EMUL_OKAY:
+                break;
+            case X86EMUL_RETRY:
+            {
+                struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
+
+                if ( vio->io_state != HVMIO_handle_vmport_awaiting_completion )
+                    return 0;
+                break;
+            }
+            default:
+                gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
+                domain_crash(current->domain);
+                break;
+            }
+        }
             break;
         }
 
diff --git a/xen/include/asm-x86/hvm/emulate.h 
b/xen/include/asm-x86/hvm/emulate.h
index 5411302..7d18435 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -53,6 +53,8 @@ struct segment_register *hvmemul_get_seg_reg(
 int hvmemul_do_pio(
     unsigned long port, unsigned long *reps, int size,
     paddr_t ram_gpa, int dir, int df, void *p_data);
+int hvmemul_do_vmport(uint16_t addr, int size, int dir,
+                     struct cpu_user_regs *regs);
 
 void hvm_dump_emulation_state(const char *prefix,
                               struct hvm_emulate_ctxt *hvmemul_ctxt);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 01e0665..1e63d7f 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -36,6 +36,7 @@ enum hvm_io_state {
     HVMIO_awaiting_completion,
     HVMIO_handle_mmio_awaiting_completion,
     HVMIO_handle_pio_awaiting_completion,
+    HVMIO_handle_vmport_awaiting_completion,
     HVMIO_completed
 };
 
diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
index 5b5fedf..7d5ba58 100644
--- a/xen/include/public/hvm/ioreq.h
+++ b/xen/include/public/hvm/ioreq.h
@@ -35,6 +35,7 @@
 #define IOREQ_TYPE_PIO          0 /* pio */
 #define IOREQ_TYPE_COPY         1 /* mmio ops */
 #define IOREQ_TYPE_PCI_CONFIG   2
+#define IOREQ_TYPE_VMWARE_PORT  3
 #define IOREQ_TYPE_TIMEOFFSET   7
 #define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
 
@@ -48,6 +49,8 @@
  * 
  * 63....48|47..40|39..35|34..32|31........0
  * SEGMENT |BUS   |DEV   |FN    |OFFSET
+ *
+ * For I/O type IOREQ_TYPE_VMWARE_PORT use the vmware_ioreq.
  */
 struct ioreq {
     uint64_t addr;          /* physical address */
@@ -66,6 +69,22 @@ struct ioreq {
 };
 typedef struct ioreq ioreq_t;
 
+struct vmware_ioreq {
+    uint32_t esi;
+    uint32_t edi;
+    uint32_t eax;
+    uint32_t ebx;
+    uint32_t ecx;
+    uint32_t edx;
+    uint32_t vp_eport;      /* evtchn for notifications to/from device model */
+    uint16_t addr;
+    uint8_t  state:4;
+    uint8_t  dir:1;         /* 1=read, 0=write */
+    uint8_t  size:3;
+    uint8_t  type;          /* I/O type */
+};
+typedef struct vmware_ioreq vmware_ioreq_t;
+
 struct shared_iopage {
     struct ioreq vcpu_ioreq[1];
 };
-- 
1.8.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.