[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v12 7/8] Add IOREQ_TYPE_VMWARE_PORT



From: Don Slutz <dslutz@xxxxxxxxxxx>

This adds synchronization of the 6 vcpu registers (only 32bits of
them) that vmport.c needs between Xen and QEMU.

This is to avoid a 2nd and 3rd exchange between QEMU and Xen to
fetch and put these 6 vcpu registers used by the code in vmport.c
and vmmouse.c

In the tools, enable usage of QEMU's vmport code.

The currently most useful VMware port support that QEMU has is the
VMware mouse support.  Xorg included a VMware mouse support that
uses absolute mode.  This make using a mouse in X11 much nicer.

Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
CC: Don Slutz <don.slutz@xxxxxxxxx>
---
v12:
  Rebase changes.

  Pass size to vmport_check_port() -- required if overlap
  I.E. inl on port 0x5657, 0x5656, 0x5655, 0x5659, 0x565a,
  and 0x565b.

  Move define of vmport_check_port() into this patch from ring3
  patch.

v11:
  No change

v10:
  These literals should become an enum.
    I don't think the invalidate type is needed.
    Code handling "case X86EMUL_UNHANDLEABLE:" in emulate.c
    is unclear.
    Comment about "special' range of 1" is not clear.


v9:
  New code was presented as an RFC before this.

  Paul Durrant sugested I add support for other IOREQ types
  to HVMOP_map_io_range_to_ioreq_server.
    I have done this.

 tools/libxc/xc_hvm_build_x86.c   |   5 +-
 tools/libxl/libxl_dm.c           |   2 +
 xen/arch/x86/hvm/emulate.c       |  83 +++++++++++++++---
 xen/arch/x86/hvm/hvm.c           | 182 ++++++++++++++++++++++++++++++++++-----
 xen/arch/x86/hvm/io.c            |  16 ++++
 xen/arch/x86/hvm/vmware/vmport.c |  13 +++
 xen/include/asm-x86/hvm/domain.h |   3 +-
 xen/include/asm-x86/hvm/hvm.h    |   2 +
 xen/include/public/hvm/hvm_op.h  |   5 ++
 xen/include/public/hvm/ioreq.h   |  17 ++++
 xen/include/public/hvm/params.h  |   4 +-
 11 files changed, 293 insertions(+), 39 deletions(-)

diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
index 003ea06..feebbf7 100644
--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -46,7 +46,8 @@
 #define SPECIALPAGE_IOREQ    5
 #define SPECIALPAGE_IDENT_PT 6
 #define SPECIALPAGE_CONSOLE  7
-#define NR_SPECIAL_PAGES     8
+#define SPECIALPAGE_VMPORT_REGS 8
+#define NR_SPECIAL_PAGES     9
 #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
 
 #define NR_IOREQ_SERVER_PAGES 8
@@ -610,6 +611,8 @@ static int setup_guest(xc_interface *xch,
                      special_pfn(SPECIALPAGE_BUFIOREQ));
     xc_hvm_param_set(xch, dom, HVM_PARAM_IOREQ_PFN,
                      special_pfn(SPECIALPAGE_IOREQ));
+    xc_hvm_param_set(xch, dom, HVM_PARAM_VMPORT_REGS_PFN,
+                     special_pfn(SPECIALPAGE_VMPORT_REGS));
     xc_hvm_param_set(xch, dom, HVM_PARAM_CONSOLE_PFN,
                      special_pfn(SPECIALPAGE_CONSOLE));
     xc_hvm_param_set(xch, dom, HVM_PARAM_PAGING_RING_PFN,
diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
index 975996d..966ca46 100644
--- a/tools/libxl/libxl_dm.c
+++ b/tools/libxl/libxl_dm.c
@@ -831,6 +831,8 @@ static int libxl__build_device_model_args_new(libxl__gc *gc,
                                             machinearg, max_ram_below_4g);
             }
         }
+        if (libxl_defbool_val(c_info->vmware_port))
+            machinearg = GCSPRINTF("%s,vmport=on", machinearg);
         flexarray_append(dm_args, machinearg);
         for (i = 0; b_info->extra_hvm && b_info->extra_hvm[i] != NULL; i++)
             flexarray_append(dm_args, b_info->extra_hvm[i]);
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index fe5661d..51a97d5 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -169,33 +169,88 @@ static int hvmemul_do_io(
             vio->io_state = HVMIO_handle_mmio_awaiting_completion;
         break;
     case X86EMUL_UNHANDLEABLE:
-    {
-        struct hvm_ioreq_server *s =
-            hvm_select_ioreq_server(curr->domain, &p);
-
-        /* If there is no suitable backing DM, just ignore accesses */
-        if ( !s )
+        if ( vmport_check_port(p.addr, p.size) )
         {
-            hvm_complete_assist_req(&p);
-            rc = X86EMUL_OKAY;
-            vio->io_state = HVMIO_none;
+            struct hvm_ioreq_server *s =
+                hvm_select_ioreq_server(curr->domain, &p);
+
+            /* If there is no suitable backing DM, just ignore accesses */
+            if ( !s )
+            {
+                hvm_complete_assist_req(&p);
+                rc = X86EMUL_OKAY;
+                vio->io_state = HVMIO_none;
+            }
+            else
+            {
+                rc = hvm_send_assist_req(s, &p);
+                if ( rc != X86EMUL_RETRY )
+                    vio->io_state = HVMIO_none;
+                else if ( data_is_addr || dir == IOREQ_WRITE )
+                    rc = X86EMUL_OKAY;
+            }
         }
         else
         {
-            rc = hvm_send_assist_req(s, &p);
-            if ( rc != X86EMUL_RETRY )
-                vio->io_state = HVMIO_none;
-            else if ( data_is_addr || dir == IOREQ_WRITE )
+            struct hvm_ioreq_server *s;
+            vmware_regs_t *vr;
+
+            BUILD_BUG_ON(sizeof(ioreq_t) < sizeof(vmware_regs_t));
+
+            p.type = IOREQ_TYPE_VMWARE_PORT;
+            s = hvm_select_ioreq_server(curr->domain, &p);
+            vr = get_vmport_regs_any(s, curr);
+
+            /*
+             * If there is no suitable backing DM, just ignore accesses.  If
+             * we do not have access to registers to pass to QEMU, just
+             * ignore access.
+             */
+            if ( !s || !vr )
+            {
+                hvm_complete_assist_req(&p);
                 rc = X86EMUL_OKAY;
+                vio->io_state = HVMIO_none;
+            }
+            else
+            {
+                struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+                p.data = regs->rax;
+                vr->ebx = regs->_ebx;
+                vr->ecx = regs->_ecx;
+                vr->edx = regs->_edx;
+                vr->esi = regs->_esi;
+                vr->edi = regs->_edi;
+
+                vio->io_state = HVMIO_handle_pio_awaiting_completion;
+                rc = hvm_send_assist_req(s, &p);
+                if ( rc != X86EMUL_RETRY )
+                {
+                    vio->io_state = HVMIO_none;
+                    if ( rc == X86EMUL_OKAY )
+                        rc = X86EMUL_RETRY;
+                }
+            }
         }
         break;
-    }
     default:
         BUG();
     }
 
     if ( rc != X86EMUL_OKAY )
+    {
+        /*
+         * If rc is X86EMUL_RETRY and vio->io_state is
+         * HVMIO_handle_pio_awaiting_completion, then were are of
+         * type IOREQ_TYPE_VMWARE_PORT, so completion in
+         * hvm_io_assist() with no re-emulation required
+         */
+        if ( rc == X86EMUL_RETRY &&
+             vio->io_state == HVMIO_handle_pio_awaiting_completion )
+            rc = X86EMUL_OKAY;
         return rc;
+    }
 
  finish_access:
     if ( dir == IOREQ_READ )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f8ec170..ce8cd09 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -394,6 +394,47 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, 
struct vcpu *v)
     return &p->vcpu_ioreq[v->vcpu_id];
 }
 
+static vmware_regs_t *get_vmport_regs_one(struct hvm_ioreq_server *s,
+                                          struct vcpu *v)
+{
+    struct hvm_ioreq_vcpu *sv;
+
+    list_for_each_entry ( sv,
+                          &s->ioreq_vcpu_list,
+                          list_entry )
+    {
+        if ( sv->vcpu == v )
+        {
+            shared_vmport_iopage_t *p = s->vmport_ioreq.va;
+            if ( !p )
+                return NULL;
+            return &p->vcpu_vmport_regs[v->vcpu_id];
+        }
+    }
+    return NULL;
+}
+
+vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct vcpu *v)
+{
+    struct domain *d = v->domain;
+
+    ASSERT((v == current) || !vcpu_runnable(v));
+
+    if ( s )
+        return get_vmport_regs_one(s, v);
+
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server.list,
+                          list_entry )
+    {
+        vmware_regs_t *ret = get_vmport_regs_one(s, v);
+
+        if ( ret )
+            return ret;
+    }
+    return NULL;
+}
+
 bool_t hvm_io_pending(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -504,22 +545,56 @@ static void hvm_free_ioreq_gmfn(struct domain *d, 
unsigned long gmfn)
         set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
 }
 
-static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
+typedef enum {
+    IOREQ_PAGE_TYPE_IOREQ,
+    IOREQ_PAGE_TYPE_BUFIOREQ,
+    IOREQ_PAGE_TYPE_VMPORT,
+} ioreq_page_type_t;
+
+static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, ioreq_page_type_t 
buf)
 {
-    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+    struct hvm_ioreq_page *iorp = NULL;
+
+    switch ( buf )
+    {
+    case IOREQ_PAGE_TYPE_IOREQ:
+        iorp = &s->ioreq;
+        break;
+    case IOREQ_PAGE_TYPE_BUFIOREQ:
+        iorp = &s->bufioreq;
+        break;
+    case IOREQ_PAGE_TYPE_VMPORT:
+        iorp = &s->vmport_ioreq;
+        break;
+    }
+    ASSERT(iorp);
 
     destroy_ring_for_helper(&iorp->va, iorp->page);
 }
 
 static int hvm_map_ioreq_page(
-    struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
+    struct hvm_ioreq_server *s, ioreq_page_type_t buf, unsigned long gmfn)
 {
     struct domain *d = s->domain;
-    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+    struct hvm_ioreq_page *iorp = NULL;
     struct page_info *page;
     void *va;
     int rc;
 
+    switch ( buf )
+    {
+    case IOREQ_PAGE_TYPE_IOREQ:
+        iorp = &s->ioreq;
+        break;
+    case IOREQ_PAGE_TYPE_BUFIOREQ:
+        iorp = &s->bufioreq;
+        break;
+    case IOREQ_PAGE_TYPE_VMPORT:
+        iorp = &s->vmport_ioreq;
+        break;
+    }
+    ASSERT(iorp);
+
     if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
         return rc;
 
@@ -735,19 +810,32 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
hvm_ioreq_server *s)
 
 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
                                       unsigned long ioreq_pfn,
-                                      unsigned long bufioreq_pfn)
+                                      unsigned long bufioreq_pfn,
+                                      unsigned long vmport_ioreq_pfn)
 {
     int rc;
 
-    rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
+    rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ, ioreq_pfn);
     if ( rc )
         return rc;
 
     if ( bufioreq_pfn != INVALID_GFN )
-        rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
+        rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ, bufioreq_pfn);
 
     if ( rc )
-        hvm_unmap_ioreq_page(s, 0);
+    {
+        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ);
+        return rc;
+    }
+
+    rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_VMPORT, vmport_ioreq_pfn);
+
+    if ( rc )
+    {
+        if ( bufioreq_pfn != INVALID_GFN )
+            hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ);
+        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ);
+    }
 
     return rc;
 }
@@ -759,6 +847,8 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
     struct domain *d = s->domain;
     unsigned long ioreq_pfn = INVALID_GFN;
     unsigned long bufioreq_pfn = INVALID_GFN;
+    unsigned long vmport_ioreq_pfn =
+        d->arch.hvm_domain.params[HVM_PARAM_VMPORT_REGS_PFN];
     int rc;
 
     if ( is_default )
@@ -770,7 +860,8 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
         ASSERT(handle_bufioreq);
         return hvm_ioreq_server_map_pages(s,
                    d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
-                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
+                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN],
+                   vmport_ioreq_pfn);
     }
 
     rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
@@ -779,8 +870,8 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
         rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
 
     if ( !rc )
-        rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn);
-
+        rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn,
+                                        vmport_ioreq_pfn);
     if ( rc )
     {
         hvm_free_ioreq_gmfn(d, ioreq_pfn);
@@ -795,11 +886,15 @@ static void hvm_ioreq_server_unmap_pages(struct 
hvm_ioreq_server *s,
 {
     struct domain *d = s->domain;
     bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
+    bool_t handle_vmport_ioreq = ( s->vmport_ioreq.va != NULL );
+
+    if ( handle_vmport_ioreq )
+        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_VMPORT);
 
     if ( handle_bufioreq )
-        hvm_unmap_ioreq_page(s, 1);
+        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ);
 
-    hvm_unmap_ioreq_page(s, 0);
+    hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ);
 
     if ( !is_default )
     {
@@ -834,12 +929,38 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
     {
         char *name;
+        char *type_name = NULL;
+        unsigned int limit;
 
-        rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
-                      "");
+        switch ( i )
+        {
+        case HVMOP_IO_RANGE_PORT:
+            type_name = "port";
+            limit = MAX_NR_IO_RANGES;
+            break;
+        case HVMOP_IO_RANGE_MEMORY:
+            type_name = "memory";
+            limit = MAX_NR_IO_RANGES;
+            break;
+        case HVMOP_IO_RANGE_PCI:
+            type_name = "pci";
+            limit = MAX_NR_IO_RANGES;
+            break;
+        case HVMOP_IO_RANGE_VMWARE_PORT:
+            type_name = "VMware port";
+            limit = 1;
+            break;
+        case HVMOP_IO_RANGE_TIMEOFFSET:
+            type_name = "timeoffset";
+            limit = 1;
+            break;
+        default:
+            break;
+        }
+        if ( !type_name )
+            continue;
+
+        rc = asprintf(&name, "ioreq_server %d %s", s->id, type_name);
         if ( rc )
             goto fail;
 
@@ -852,7 +973,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
         if ( !s->range[i] )
             goto fail;
 
-        rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
+        rangeset_limit(s->range[i], limit);
+
+        /* VMware port */
+        if ( i == HVMOP_IO_RANGE_VMWARE_PORT &&
+            s->domain->arch.hvm_domain.is_vmware_port_enabled )
+            rc = rangeset_add_range(s->range[i], 1, 1);
     }
 
  done:
@@ -1157,6 +1283,8 @@ static int hvm_map_io_range_to_ioreq_server(struct domain 
*d, ioservid_t id,
             case HVMOP_IO_RANGE_PORT:
             case HVMOP_IO_RANGE_MEMORY:
             case HVMOP_IO_RANGE_PCI:
+            case HVMOP_IO_RANGE_VMWARE_PORT:
+            case HVMOP_IO_RANGE_TIMEOFFSET:
                 r = s->range[type];
                 break;
 
@@ -1208,6 +1336,8 @@ static int hvm_unmap_io_range_from_ioreq_server(struct 
domain *d, ioservid_t id,
             case HVMOP_IO_RANGE_PORT:
             case HVMOP_IO_RANGE_MEMORY:
             case HVMOP_IO_RANGE_PCI:
+            case HVMOP_IO_RANGE_VMWARE_PORT:
+            case HVMOP_IO_RANGE_TIMEOFFSET:
                 r = s->range[type];
                 break;
 
@@ -2425,9 +2555,6 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
     if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
         return NULL;
 
-    if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
-        return d->arch.hvm_domain.default_ioreq_server;
-
     cf8 = d->arch.hvm_domain.pci_cf8;
 
     if ( p->type == IOREQ_TYPE_PIO &&
@@ -2480,7 +2607,10 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
         BUILD_BUG_ON(IOREQ_TYPE_PIO != HVMOP_IO_RANGE_PORT);
         BUILD_BUG_ON(IOREQ_TYPE_COPY != HVMOP_IO_RANGE_MEMORY);
         BUILD_BUG_ON(IOREQ_TYPE_PCI_CONFIG != HVMOP_IO_RANGE_PCI);
+        BUILD_BUG_ON(IOREQ_TYPE_VMWARE_PORT != HVMOP_IO_RANGE_VMWARE_PORT);
+        BUILD_BUG_ON(IOREQ_TYPE_TIMEOFFSET != HVMOP_IO_RANGE_TIMEOFFSET);
         r = s->range[type];
+        ASSERT(r);
 
         switch ( type )
         {
@@ -2507,6 +2637,13 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
             }
 
             break;
+        case IOREQ_TYPE_VMWARE_PORT:
+        case IOREQ_TYPE_TIMEOFFSET:
+            /* The 'special' range of [1,1] is checked for being enabled */
+            if ( rangeset_contains_singleton(r, 1) )
+                return s;
+
+            break;
         }
     }
 
@@ -2669,6 +2806,7 @@ void hvm_complete_assist_req(ioreq_t *p)
     case IOREQ_TYPE_PCI_CONFIG:
         ASSERT_UNREACHABLE();
         break;
+    case IOREQ_TYPE_VMWARE_PORT:
     case IOREQ_TYPE_COPY:
     case IOREQ_TYPE_PIO:
         if ( p->dir == IOREQ_READ )
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index c0964ec..c1379bf 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -192,6 +192,22 @@ void hvm_io_assist(ioreq_t *p)
         (void)handle_mmio();
         break;
     case HVMIO_handle_pio_awaiting_completion:
+        if ( p->type == IOREQ_TYPE_VMWARE_PORT )
+        {
+            vmware_regs_t *vr = get_vmport_regs_any(NULL, curr);
+
+            if ( vr )
+            {
+                struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+                /* Only change the 32bit part of the register */
+                regs->_ebx = vr->ebx;
+                regs->_ecx = vr->ecx;
+                regs->_edx = vr->edx;
+                regs->_esi = vr->esi;
+                regs->_edi = vr->edi;
+            }
+        }
         if ( vio->io_size == 4 ) /* Needs zero extension. */
             guest_cpu_user_regs()->rax = (uint32_t)p->data;
         else
diff --git a/xen/arch/x86/hvm/vmware/vmport.c b/xen/arch/x86/hvm/vmware/vmport.c
index f24d8e3..5e14402 100644
--- a/xen/arch/x86/hvm/vmware/vmport.c
+++ b/xen/arch/x86/hvm/vmware/vmport.c
@@ -137,6 +137,19 @@ void vmport_register(struct domain *d)
     register_portio_handler(d, BDOOR_PORT, 4, vmport_ioport);
 }
 
+int vmport_check_port(unsigned int port, unsigned int bytes)
+{
+    struct vcpu *curr = current;
+    struct domain *currd = curr->domain;
+
+    if ( port + bytes > BDOOR_PORT && port < BDOOR_PORT + 4 &&
+         is_hvm_domain(currd) &&
+         currd->arch.hvm_domain.is_vmware_port_enabled ) {
+        return 0;
+    }
+    return 1;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 726b977..a6c312f 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -48,7 +48,7 @@ struct hvm_ioreq_vcpu {
     evtchn_port_t    ioreq_evtchn;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_VMWARE_PORT + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
@@ -63,6 +63,7 @@ struct hvm_ioreq_server {
     ioservid_t             id;
     struct hvm_ioreq_page  ioreq;
     struct list_head       ioreq_vcpu_list;
+    struct hvm_ioreq_page  vmport_ioreq;
     struct hvm_ioreq_page  bufioreq;
 
     /* Lock to serialize access to buffered ioreq ring */
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 394c230..78752b4 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -523,6 +523,8 @@ extern bool_t opt_hvm_fep;
 #endif
 
 void vmport_register(struct domain *d);
+int vmport_check_port(unsigned int port, unsigned int bytes);
+vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct vcpu *v);
 
 #endif /* __ASM_X86_HVM_HVM_H__ */
 
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 9b84e84..4d10eee 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -321,6 +321,9 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
  *
  * NOTE: unless an emulation request falls entirely within a range mapped
  * by a secondary emulator, it will not be passed to that emulator.
+ *
+ * NOTE: The 'special' range of [1,1] is what is checked for on
+ * TIMEOFFSET and VMWARE_PORT.
  */
 #define HVMOP_map_io_range_to_ioreq_server 19
 #define HVMOP_unmap_io_range_from_ioreq_server 20
@@ -331,6 +334,8 @@ struct xen_hvm_io_range {
 # define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
 # define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
 # define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+# define HVMOP_IO_RANGE_TIMEOFFSET 7 /* TIMEOFFSET special range */
+# define HVMOP_IO_RANGE_VMWARE_PORT 9 /* VMware port special range */
     uint64_aligned_t start, end; /* IN - inclusive start and end of range */
 };
 typedef struct xen_hvm_io_range xen_hvm_io_range_t;
diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
index 2e5809b..2f326cf 100644
--- a/xen/include/public/hvm/ioreq.h
+++ b/xen/include/public/hvm/ioreq.h
@@ -37,6 +37,7 @@
 #define IOREQ_TYPE_PCI_CONFIG   2
 #define IOREQ_TYPE_TIMEOFFSET   7
 #define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
+#define IOREQ_TYPE_VMWARE_PORT  9 /* pio + vmport registers */
 
 /*
  * VMExit dispatcher should cooperate with instruction decoder to
@@ -48,6 +49,8 @@
  * 
  * 63....48|47..40|39..35|34..32|31........0
  * SEGMENT |BUS   |DEV   |FN    |OFFSET
+ *
+ * For I/O type IOREQ_TYPE_VMWARE_PORT also use the vmware_regs.
  */
 struct ioreq {
     uint64_t addr;          /* physical address */
@@ -66,11 +69,25 @@ struct ioreq {
 };
 typedef struct ioreq ioreq_t;
 
+struct vmware_regs {
+    uint32_t esi;
+    uint32_t edi;
+    uint32_t ebx;
+    uint32_t ecx;
+    uint32_t edx;
+};
+typedef struct vmware_regs vmware_regs_t;
+
 struct shared_iopage {
     struct ioreq vcpu_ioreq[1];
 };
 typedef struct shared_iopage shared_iopage_t;
 
+struct shared_vmport_iopage {
+    struct vmware_regs vcpu_vmport_regs[1];
+};
+typedef struct shared_vmport_iopage shared_vmport_iopage_t;
+
 struct buf_ioreq {
     uint8_t  type;   /* I/O type                    */
     uint8_t  pad:1;
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 7c73089..130eba9 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -50,6 +50,8 @@
 #define HVM_PARAM_PAE_ENABLED  4
 
 #define HVM_PARAM_IOREQ_PFN    5
+/* Extra vmport PFN. */
+#define HVM_PARAM_VMPORT_REGS_PFN 35
 
 #define HVM_PARAM_BUFIOREQ_PFN 6
 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
@@ -187,6 +189,6 @@
 /* Location of the VM Generation ID in guest physical address space. */
 #define HVM_PARAM_VM_GENERATION_ID_ADDR 34
 
-#define HVM_NR_PARAMS          35
+#define HVM_NR_PARAMS          36
 
 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.