[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v13 7/8] Add IOREQ_TYPE_VMWARE_PORT



> -----Original Message-----
> From: xen-devel-bounces@xxxxxxxxxxxxx [mailto:xen-devel-
> bounces@xxxxxxxxxxxxx] On Behalf Of Don Slutz
> Sent: 28 November 2015 21:45
> To: xen-devel@xxxxxxxxxxxxx
> Cc: Jun Nakajima; Wei Liu; Kevin Tian; Keir (Xen.org); Ian Campbell; George
> Dunlap; Andrew Cooper; Stefano Stabellini; Eddie Dong; Don Slutz; Don Slutz;
> Tim (Xen.org); Aravind Gopalakrishnan; Jan Beulich; Suravee Suthikulpanit;
> Boris Ostrovsky; Ian Jackson
> Subject: [Xen-devel] [PATCH v13 7/8] Add IOREQ_TYPE_VMWARE_PORT
> 
> From: Don Slutz <dslutz@xxxxxxxxxxx>
> 
> This adds synchronization of the 6 vcpu registers (only 32bits of
> them) that vmport.c needs between Xen and QEMU.
> 
> This is to avoid a 2nd and 3rd exchange between QEMU and Xen to
> fetch and put these 6 vcpu registers used by the code in vmport.c
> and vmmouse.c
> 
> In the tools, enable usage of QEMU's vmport code.
> 
> The currently most useful VMware port support that QEMU has is the
> VMware mouse support.  Xorg included a VMware mouse support that
> uses absolute mode.  This make using a mouse in X11 much nicer.
> 
> Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> CC: Don Slutz <don.slutz@xxxxxxxxx>
> ---
> v13:
>   Rebased on staging (not a simple rebase, needed rework to function
>   with changes).
>     I would have made this !vmport_check_port ...
>        Changed to !is_vmware, and invert vmport_check_port's return value.
>   Use 1 one for "list_for_each_entry ( sv, ..."
>   Added full stop in comments.
> 
> v12:
>   Rebase changes.
> 
>   Pass size to vmport_check_port() -- required if overlap
>   I.E. inl on port 0x5657, 0x5656, 0x5655, 0x5659, 0x565a,
>   and 0x565b.
> 
>   Move define of vmport_check_port() into this patch from ring3
>   patch.
> 
> v11:
>   No change
> 
> v10:
>   These literals should become an enum.
>     I don't think the invalidate type is needed.
>     Code handling "case X86EMUL_UNHANDLEABLE:" in emulate.c
>     is unclear.
>     Comment about "special' range of 1" is not clear.
> 
> 
> v9:
>   New code was presented as an RFC before this.
> 
>   Paul Durrant sugested I add support for other IOREQ types
>   to HVMOP_map_io_range_to_ioreq_server.
>     I have done this.
> 
>  tools/libxc/xc_dom_x86.c         |   5 +-
>  tools/libxl/libxl_dm.c           |   2 +
>  xen/arch/x86/hvm/emulate.c       |  68 +++++++++++---
>  xen/arch/x86/hvm/hvm.c           | 194
> ++++++++++++++++++++++++++++++++++-----
>  xen/arch/x86/hvm/vmware/vmport.c |  14 +++
>  xen/include/asm-x86/hvm/domain.h |   3 +-
>  xen/include/asm-x86/hvm/hvm.h    |   2 +
>  xen/include/public/hvm/hvm_op.h  |   5 +
>  xen/include/public/hvm/ioreq.h   |  17 ++++
>  xen/include/public/hvm/params.h  |   4 +-
>  10 files changed, 275 insertions(+), 39 deletions(-)
> 
> diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
> index 5ff33ca..4c8a7fe 100644
> --- a/tools/libxc/xc_dom_x86.c
> +++ b/tools/libxc/xc_dom_x86.c
> @@ -60,7 +60,8 @@
>  #define SPECIALPAGE_IOREQ    5
>  #define SPECIALPAGE_IDENT_PT 6
>  #define SPECIALPAGE_CONSOLE  7
> -#define NR_SPECIAL_PAGES     8
> +#define SPECIALPAGE_VMPORT_REGS 8
> +#define NR_SPECIAL_PAGES     9
>  #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
> 
>  #define NR_IOREQ_SERVER_PAGES 8
> @@ -613,6 +614,8 @@ static int alloc_magic_pages_hvm(struct
> xc_dom_image *dom)
>                       special_pfn(SPECIALPAGE_BUFIOREQ));
>      xc_hvm_param_set(xch, domid, HVM_PARAM_IOREQ_PFN,
>                       special_pfn(SPECIALPAGE_IOREQ));
> +    xc_hvm_param_set(xch, domid, HVM_PARAM_VMPORT_REGS_PFN,
> +                     special_pfn(SPECIALPAGE_VMPORT_REGS));
>      xc_hvm_param_set(xch, domid, HVM_PARAM_CONSOLE_PFN,
>                       special_pfn(SPECIALPAGE_CONSOLE));
>      xc_hvm_param_set(xch, domid, HVM_PARAM_PAGING_RING_PFN,
> diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
> index c76fd90..50e6d93 100644
> --- a/tools/libxl/libxl_dm.c
> +++ b/tools/libxl/libxl_dm.c
> @@ -1150,6 +1150,8 @@ static int
> libxl__build_device_model_args_new(libxl__gc *gc,
>              }
>          }
> 
> +        if (libxl_defbool_val(c_info->vmware_port))
> +            machinearg = GCSPRINTF("%s,vmport=on", machinearg);
>          flexarray_append(dm_args, machinearg);
>          for (i = 0; b_info->extra_hvm && b_info->extra_hvm[i] != NULL; i++)
>              flexarray_append(dm_args, b_info->extra_hvm[i]);
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index e1017b5..d407741 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -113,6 +113,7 @@ static int hvmemul_do_io(
>      };
>      void *p_data = (void *)data;
>      int rc;
> +    bool_t is_vmware = !is_mmio && !data_is_addr &&
> vmport_check_port(p.addr, p.size);
> 
>      /*
>       * Weird-sized accesses have undefined behaviour: we discard writes
> @@ -133,7 +134,7 @@ static int hvmemul_do_io(
>          p = vio->io_req;
> 
>          /* Verify the emulation request has been correctly re-issued */
> -        if ( (p.type != is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO) ||
> +        if ( (p.type != (is_mmio ? IOREQ_TYPE_COPY : is_vmware ?
> IOREQ_TYPE_VMWARE_PORT : IOREQ_TYPE_PIO)) ||

is_vmware already incorporated !is_mmio so there's a redundant check in that 
expression. The extra test also makes it look pretty ugly... probably better 
re-factored into an if statement.

>               (p.addr != addr) ||
>               (p.size != size) ||
>               (p.count != reps) ||
> @@ -167,26 +168,65 @@ static int hvmemul_do_io(
>          vio->io_req.state = STATE_IOREQ_NONE;
>          break;
>      case X86EMUL_UNHANDLEABLE:
> -    {
> -        struct hvm_ioreq_server *s =
> -            hvm_select_ioreq_server(curr->domain, &p);
> -
> -        /* If there is no suitable backing DM, just ignore accesses */
> -        if ( !s )
> +        if ( !is_vmware )
>          {
> -            rc = hvm_process_io_intercept(&null_handler, &p);
> -            vio->io_req.state = STATE_IOREQ_NONE;
> +            struct hvm_ioreq_server *s =
> +                hvm_select_ioreq_server(curr->domain, &p);
> +
> +            /* If there is no suitable backing DM, just ignore accesses */
> +            if ( !s )
> +            {
> +                rc = hvm_process_io_intercept(&null_handler, &p);
> +                vio->io_req.state = STATE_IOREQ_NONE;
> +            }
> +            else
> +            {
> +                rc = hvm_send_ioreq(s, &p, 0);
> +                if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
> +                    vio->io_req.state = STATE_IOREQ_NONE;
> +                else if ( data_is_addr )
> +                    rc = X86EMUL_OKAY;
> +            }
>          }
>          else
>          {
> -            rc = hvm_send_ioreq(s, &p, 0);
> -            if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
> +            struct hvm_ioreq_server *s;
> +            vmware_regs_t *vr;
> +
> +            BUILD_BUG_ON(sizeof(ioreq_t) < sizeof(vmware_regs_t));
> +
> +            p.type = IOREQ_TYPE_VMWARE_PORT;
> +            vio->io_req.type = IOREQ_TYPE_VMWARE_PORT;

This could be done in a single statement.

> +            s = hvm_select_ioreq_server(curr->domain, &p);
> +            vr = get_vmport_regs_any(s, curr);
> +
> +            /*
> +             * If there is no suitable backing DM, just ignore accesses.  If
> +             * we do not have access to registers to pass to QEMU, just
> +             * ignore access.
> +             */
> +            if ( !s || !vr )
> +            {
> +                rc = hvm_process_io_intercept(&null_handler, &p);
>                  vio->io_req.state = STATE_IOREQ_NONE;
> -            else if ( data_is_addr )
> -                rc = X86EMUL_OKAY;
> +            }
> +            else
> +            {
> +                struct cpu_user_regs *regs = guest_cpu_user_regs();
> +
> +                p.data = regs->rax;
> +                vr->ebx = regs->_ebx;
> +                vr->ecx = regs->_ecx;
> +                vr->edx = regs->_edx;
> +                vr->esi = regs->_esi;
> +                vr->edi = regs->_edi;
> +
> +                rc = hvm_send_ioreq(s, &p, 0);
> +                if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
> +                    vio->io_req.state = STATE_IOREQ_NONE;
> +            }
>          }
>          break;
> -    }
>      default:
>          BUG();
>      }
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index c347b63..07b4025 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -415,6 +415,45 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server
> *s, struct vcpu *v)
>      return &p->vcpu_ioreq[v->vcpu_id];
>  }
> 
> +static vmware_regs_t *get_vmport_regs_one(struct hvm_ioreq_server *s,
> +                                          struct vcpu *v)
> +{
> +    struct hvm_ioreq_vcpu *sv;
> +
> +    list_for_each_entry ( sv, &s->ioreq_vcpu_list, list_entry )
> +    {
> +        if ( sv->vcpu == v )
> +        {
> +            shared_vmport_iopage_t *p = s->vmport_ioreq.va;
> +            if ( !p )
> +                return NULL;
> +            return &p->vcpu_vmport_regs[v->vcpu_id];
> +        }
> +    }
> +    return NULL;
> +}
> +
> +vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct
> vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +
> +    ASSERT((v == current) || !vcpu_runnable(v));
> +
> +    if ( s )
> +        return get_vmport_regs_one(s, v);
> +
> +    list_for_each_entry ( s,
> +                          &d->arch.hvm_domain.ioreq_server.list,
> +                          list_entry )
> +    {
> +        vmware_regs_t *ret = get_vmport_regs_one(s, v);
> +
> +        if ( ret )
> +            return ret;
> +    }
> +    return NULL;
> +}
> +
>  bool_t hvm_io_pending(struct vcpu *v)
>  {
>      struct domain *d = v->domain;
> @@ -536,6 +575,21 @@ void hvm_do_resume(struct vcpu *v)
>          handle_mmio();
>          break;
>      case HVMIO_pio_completion:
> +        if ( vio->io_req.type == IOREQ_TYPE_VMWARE_PORT ) {
> +            vmware_regs_t *vr = get_vmport_regs_any(NULL, v);
> +
> +            if ( vr )
> +            {
> +                struct cpu_user_regs *regs = guest_cpu_user_regs();
> +
> +                /* Only change the 32bit part of the register. */
> +                regs->_ebx = vr->ebx;
> +                regs->_ecx = vr->ecx;
> +                regs->_edx = vr->edx;
> +                regs->_esi = vr->esi;
> +                regs->_edi = vr->edi;
> +            }
> +        }
>          (void)handle_pio(vio->io_req.addr, vio->io_req.size,
>                           vio->io_req.dir);
>          break;
> @@ -618,22 +672,56 @@ static void hvm_free_ioreq_gmfn(struct domain
> *d, unsigned long gmfn)
>          set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
>  }
> 
> -static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t
> buf)
> +typedef enum {
> +    IOREQ_PAGE_TYPE_IOREQ,
> +    IOREQ_PAGE_TYPE_BUFIOREQ,
> +    IOREQ_PAGE_TYPE_VMPORT,
> +} ioreq_page_type_t;
> +
> +static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s,
> ioreq_page_type_t buf)
>  {
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    struct hvm_ioreq_page *iorp = NULL;
> +
> +    switch ( buf )
> +    {
> +    case IOREQ_PAGE_TYPE_IOREQ:
> +        iorp = &s->ioreq;
> +        break;
> +    case IOREQ_PAGE_TYPE_BUFIOREQ:
> +        iorp = &s->bufioreq;
> +        break;
> +    case IOREQ_PAGE_TYPE_VMPORT:
> +        iorp = &s->vmport_ioreq;
> +        break;
> +    }
> +    ASSERT(iorp);
> 
>      destroy_ring_for_helper(&iorp->va, iorp->page);
>  }
> 
>  static int hvm_map_ioreq_page(
> -    struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
> +    struct hvm_ioreq_server *s, ioreq_page_type_t buf, unsigned long
> gmfn)
>  {
>      struct domain *d = s->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    struct hvm_ioreq_page *iorp = NULL;
>      struct page_info *page;
>      void *va;
>      int rc;
> 
> +    switch ( buf )
> +    {
> +    case IOREQ_PAGE_TYPE_IOREQ:
> +        iorp = &s->ioreq;
> +        break;
> +    case IOREQ_PAGE_TYPE_BUFIOREQ:
> +        iorp = &s->bufioreq;
> +        break;
> +    case IOREQ_PAGE_TYPE_VMPORT:
> +        iorp = &s->vmport_ioreq;
> +        break;
> +    }
> +    ASSERT(iorp);
> +
>      if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
>          return rc;
> 
> @@ -849,19 +937,32 @@ static void
> hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
> 
>  static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
>                                        unsigned long ioreq_pfn,
> -                                      unsigned long bufioreq_pfn)
> +                                      unsigned long bufioreq_pfn,
> +                                      unsigned long vmport_ioreq_pfn)
>  {
>      int rc;
> 
> -    rc = hvm_map_ioreq_page(s, 0, ioreq_pfn);
> +    rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ, ioreq_pfn);
>      if ( rc )
>          return rc;
> 
>      if ( bufioreq_pfn != INVALID_GFN )
> -        rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
> +        rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ,
> bufioreq_pfn);
> 
>      if ( rc )
> -        hvm_unmap_ioreq_page(s, 0);
> +    {
> +        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ);
> +        return rc;
> +    }
> +
> +    rc = hvm_map_ioreq_page(s, IOREQ_PAGE_TYPE_VMPORT,
> vmport_ioreq_pfn);

Is every ioreq server going to have one of these? It doesn't look like it, so 
should you not have validity check on the pfn?

  Paul

> +
> +    if ( rc )
> +    {
> +        if ( bufioreq_pfn != INVALID_GFN )
> +            hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ);
> +        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ);
> +    }
> 
>      return rc;
>  }
> @@ -873,6 +974,8 @@ static int hvm_ioreq_server_setup_pages(struct
> hvm_ioreq_server *s,
>      struct domain *d = s->domain;
>      unsigned long ioreq_pfn = INVALID_GFN;
>      unsigned long bufioreq_pfn = INVALID_GFN;
> +    unsigned long vmport_ioreq_pfn =
> +        d->arch.hvm_domain.params[HVM_PARAM_VMPORT_REGS_PFN];
>      int rc;
> 
>      if ( is_default )
> @@ -884,7 +987,8 @@ static int hvm_ioreq_server_setup_pages(struct
> hvm_ioreq_server *s,
>          ASSERT(handle_bufioreq);
>          return hvm_ioreq_server_map_pages(s,
>                     d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
> -                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
> +                   d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN],
> +                   vmport_ioreq_pfn);
>      }
> 
>      rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
> @@ -893,8 +997,8 @@ static int hvm_ioreq_server_setup_pages(struct
> hvm_ioreq_server *s,
>          rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
> 
>      if ( !rc )
> -        rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn);
> -
> +        rc = hvm_ioreq_server_map_pages(s, ioreq_pfn, bufioreq_pfn,
> +                                        vmport_ioreq_pfn);
>      if ( rc )
>      {
>          hvm_free_ioreq_gmfn(d, ioreq_pfn);
> @@ -909,11 +1013,15 @@ static void
> hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
>  {
>      struct domain *d = s->domain;
>      bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
> +    bool_t handle_vmport_ioreq = ( s->vmport_ioreq.va != NULL );
> +
> +    if ( handle_vmport_ioreq )
> +        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_VMPORT);
> 
>      if ( handle_bufioreq )
> -        hvm_unmap_ioreq_page(s, 1);
> +        hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_BUFIOREQ);
> 
> -    hvm_unmap_ioreq_page(s, 0);
> +    hvm_unmap_ioreq_page(s, IOREQ_PAGE_TYPE_IOREQ);
> 
>      if ( !is_default )
>      {
> @@ -948,12 +1056,38 @@ static int
> hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
>      for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
>      {
>          char *name;
> +        char *type_name = NULL;
> +        unsigned int limit;
> 
> -        rc = asprintf(&name, "ioreq_server %d %s", s->id,
> -                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
> -                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
> -                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
> -                      "");
> +        switch ( i )
> +        {
> +        case HVMOP_IO_RANGE_PORT:
> +            type_name = "port";
> +            limit = MAX_NR_IO_RANGES;
> +            break;
> +        case HVMOP_IO_RANGE_MEMORY:
> +            type_name = "memory";
> +            limit = MAX_NR_IO_RANGES;
> +            break;
> +        case HVMOP_IO_RANGE_PCI:
> +            type_name = "pci";
> +            limit = MAX_NR_IO_RANGES;
> +            break;
> +        case HVMOP_IO_RANGE_VMWARE_PORT:
> +            type_name = "VMware port";
> +            limit = 1;
> +            break;
> +        case HVMOP_IO_RANGE_TIMEOFFSET:
> +            type_name = "timeoffset";
> +            limit = 1;
> +            break;
> +        default:
> +            break;
> +        }
> +        if ( !type_name )
> +            continue;
> +
> +        rc = asprintf(&name, "ioreq_server %d %s", s->id, type_name);
>          if ( rc )
>              goto fail;
> 
> @@ -966,7 +1100,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct
> hvm_ioreq_server *s,
>          if ( !s->range[i] )
>              goto fail;
> 
> -        rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
> +        rangeset_limit(s->range[i], limit);
> +
> +        /* VMware port */
> +        if ( i == HVMOP_IO_RANGE_VMWARE_PORT &&
> +            s->domain->arch.hvm_domain.is_vmware_port_enabled )
> +            rc = rangeset_add_range(s->range[i], 1, 1);
>      }
> 
>   done:
> @@ -1271,6 +1410,8 @@ static int
> hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
>              case HVMOP_IO_RANGE_PORT:
>              case HVMOP_IO_RANGE_MEMORY:
>              case HVMOP_IO_RANGE_PCI:
> +            case HVMOP_IO_RANGE_VMWARE_PORT:
> +            case HVMOP_IO_RANGE_TIMEOFFSET:
>                  r = s->range[type];
>                  break;
> 
> @@ -1322,6 +1463,8 @@ static int
> hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
>              case HVMOP_IO_RANGE_PORT:
>              case HVMOP_IO_RANGE_MEMORY:
>              case HVMOP_IO_RANGE_PCI:
> +            case HVMOP_IO_RANGE_VMWARE_PORT:
> +            case HVMOP_IO_RANGE_TIMEOFFSET:
>                  r = s->range[type];
>                  break;
> 
> @@ -2558,9 +2701,6 @@ struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
>      if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
>          return NULL;
> 
> -    if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
> -        return d->arch.hvm_domain.default_ioreq_server;
> -
>      cf8 = d->arch.hvm_domain.pci_cf8;
> 
>      if ( p->type == IOREQ_TYPE_PIO &&
> @@ -2613,7 +2753,10 @@ struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
>          BUILD_BUG_ON(IOREQ_TYPE_PIO != HVMOP_IO_RANGE_PORT);
>          BUILD_BUG_ON(IOREQ_TYPE_COPY != HVMOP_IO_RANGE_MEMORY);
>          BUILD_BUG_ON(IOREQ_TYPE_PCI_CONFIG !=
> HVMOP_IO_RANGE_PCI);
> +        BUILD_BUG_ON(IOREQ_TYPE_VMWARE_PORT !=
> HVMOP_IO_RANGE_VMWARE_PORT);
> +        BUILD_BUG_ON(IOREQ_TYPE_TIMEOFFSET !=
> HVMOP_IO_RANGE_TIMEOFFSET);
>          r = s->range[type];
> +        ASSERT(r);
> 
>          switch ( type )
>          {
> @@ -2640,6 +2783,13 @@ struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
>              }
> 
>              break;
> +        case IOREQ_TYPE_VMWARE_PORT:
> +        case IOREQ_TYPE_TIMEOFFSET:
> +            /* The 'special' range of [1,1] is checked for being enabled. */
> +            if ( rangeset_contains_singleton(r, 1) )
> +                return s;
> +
> +            break;
>          }
>      }
> 
> diff --git a/xen/arch/x86/hvm/vmware/vmport.c
> b/xen/arch/x86/hvm/vmware/vmport.c
> index f24d8e3..ee993b3 100644
> --- a/xen/arch/x86/hvm/vmware/vmport.c
> +++ b/xen/arch/x86/hvm/vmware/vmport.c
> @@ -137,6 +137,20 @@ void vmport_register(struct domain *d)
>      register_portio_handler(d, BDOOR_PORT, 4, vmport_ioport);
>  }
> 
> +int vmport_check_port(unsigned int port, unsigned int bytes)
> +{
> +    struct vcpu *curr = current;
> +    struct domain *currd = curr->domain;
> +
> +    if ( port + bytes > BDOOR_PORT && port < BDOOR_PORT + 4 &&
> +         is_hvm_domain(currd) &&
> +         currd->arch.hvm_domain.is_vmware_port_enabled )
> +    {
> +        return 1;
> +    }
> +    return 0;
> +}
> +
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-
> x86/hvm/domain.h
> index 5860d51..d87dedf 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -48,7 +48,7 @@ struct hvm_ioreq_vcpu {
>      bool_t           pending;
>  };
> 
> -#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
> +#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_VMWARE_PORT + 1)
>  #define MAX_NR_IO_RANGES  256
> 
>  struct hvm_ioreq_server {
> @@ -63,6 +63,7 @@ struct hvm_ioreq_server {
>      ioservid_t             id;
>      struct hvm_ioreq_page  ioreq;
>      struct list_head       ioreq_vcpu_list;
> +    struct hvm_ioreq_page  vmport_ioreq;
>      struct hvm_ioreq_page  bufioreq;
> 
>      /* Lock to serialize access to buffered ioreq ring */
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-
> x86/hvm/hvm.h
> index 857cbb4..78547ec 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -567,6 +567,8 @@ void altp2m_vcpu_update_vmfunc_ve(struct vcpu
> *v);
>  bool_t altp2m_vcpu_emulate_ve(struct vcpu *v);
> 
>  void vmport_register(struct domain *d);
> +int vmport_check_port(unsigned int port, unsigned int bytes);
> +vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct
> vcpu *v);
> 
>  #endif /* __ASM_X86_HVM_HVM_H__ */
> 
> diff --git a/xen/include/public/hvm/hvm_op.h
> b/xen/include/public/hvm/hvm_op.h
> index 1606185..3dc99af 100644
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -323,6 +323,9 @@
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
>   *
>   * NOTE: unless an emulation request falls entirely within a range mapped
>   * by a secondary emulator, it will not be passed to that emulator.
> + *
> + * NOTE: The 'special' range of [1,1] is what is checked for on
> + * TIMEOFFSET and VMWARE_PORT.
>   */
>  #define HVMOP_map_io_range_to_ioreq_server 19
>  #define HVMOP_unmap_io_range_from_ioreq_server 20
> @@ -333,6 +336,8 @@ struct xen_hvm_io_range {
>  # define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
>  # define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
>  # define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
> +# define HVMOP_IO_RANGE_TIMEOFFSET 7 /* TIMEOFFSET special range
> */
> +# define HVMOP_IO_RANGE_VMWARE_PORT 9 /* VMware port special
> range */
>      uint64_aligned_t start, end; /* IN - inclusive start and end of range */
>  };
>  typedef struct xen_hvm_io_range xen_hvm_io_range_t;
> diff --git a/xen/include/public/hvm/ioreq.h
> b/xen/include/public/hvm/ioreq.h
> index 2e5809b..2f326cf 100644
> --- a/xen/include/public/hvm/ioreq.h
> +++ b/xen/include/public/hvm/ioreq.h
> @@ -37,6 +37,7 @@
>  #define IOREQ_TYPE_PCI_CONFIG   2
>  #define IOREQ_TYPE_TIMEOFFSET   7
>  #define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
> +#define IOREQ_TYPE_VMWARE_PORT  9 /* pio + vmport registers */
> 
>  /*
>   * VMExit dispatcher should cooperate with instruction decoder to
> @@ -48,6 +49,8 @@
>   *
>   * 63....48|47..40|39..35|34..32|31........0
>   * SEGMENT |BUS   |DEV   |FN    |OFFSET
> + *
> + * For I/O type IOREQ_TYPE_VMWARE_PORT also use the vmware_regs.
>   */
>  struct ioreq {
>      uint64_t addr;          /* physical address */
> @@ -66,11 +69,25 @@ struct ioreq {
>  };
>  typedef struct ioreq ioreq_t;
> 
> +struct vmware_regs {
> +    uint32_t esi;
> +    uint32_t edi;
> +    uint32_t ebx;
> +    uint32_t ecx;
> +    uint32_t edx;
> +};
> +typedef struct vmware_regs vmware_regs_t;
> +
>  struct shared_iopage {
>      struct ioreq vcpu_ioreq[1];
>  };
>  typedef struct shared_iopage shared_iopage_t;
> 
> +struct shared_vmport_iopage {
> +    struct vmware_regs vcpu_vmport_regs[1];
> +};
> +typedef struct shared_vmport_iopage shared_vmport_iopage_t;
> +
>  struct buf_ioreq {
>      uint8_t  type;   /* I/O type                    */
>      uint8_t  pad:1;
> diff --git a/xen/include/public/hvm/params.h
> b/xen/include/public/hvm/params.h
> index b437444..61a744f 100644
> --- a/xen/include/public/hvm/params.h
> +++ b/xen/include/public/hvm/params.h
> @@ -52,6 +52,8 @@
>  #define HVM_PARAM_PAE_ENABLED  4
> 
>  #define HVM_PARAM_IOREQ_PFN    5
> +/* VMWare Port PFN. */
> +#define HVM_PARAM_VMPORT_REGS_PFN 36
> 
>  #define HVM_PARAM_BUFIOREQ_PFN 6
>  #define HVM_PARAM_BUFIOREQ_EVTCHN 26
> @@ -197,6 +199,6 @@
>  /* Boolean: Enable altp2m */
>  #define HVM_PARAM_ALTP2M       35
> 
> -#define HVM_NR_PARAMS          36
> +#define HVM_NR_PARAMS          37
> 
>  #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
> --
> 1.8.3.1
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.