[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v7 1/1] xen-hvm.c: Add support for Xen access to vmport



On Mon, 20 Oct 2014, Don Slutz wrote:
> This adds synchronisation of the 6 vcpu registers (only 32bits of
> them) that vmport.c needs between Xen and QEMU.
> 
> This is to avoid a 2nd and 3rd exchange between QEMU and Xen to
> fetch and put these 6 vcpu registers used by the code in vmport.c
> and vmmouse.c
> 
> The registers are passed in the new shared page provided by
> HVM_PARAM_VMPORT_REGS_PFN.
> 
> Add new array to XenIOState that allows selection of current_cpu by
> vcpu id.
> 
> Now pass XenIOState to handle_ioreq().
> 
> Add new routines regs_to_cpu(), regs_from_cpu(), and
> handle_vmport_ioreq().
> 
> Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>

Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

I'll add it to my queue.


> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> 
> v7:
>     Use QEMU_BUILD_BUG_ON, please do not define your own.
>       Switched to QEMU_BUILD_BUG_ON.
> 
>   Dropped Reviewed-by: Paul Durrant (moved below ---)
> 
> v6:
>     Do we need a forward declaration?
>       Nope. Drooped.
>     Added Reviewed-by.
> 
> v5:
>     vmware_ioreq_t struct is not really a request any more. Maybe
>     vmware_regs_t?
>       Renamed various parts from vmware_ioreq to vmware_regs.  Also
>       HVM_PARAM_VMPORT_IOREQ_PFN to HVM_PARAM_VMPORT_REGS_PFN.
>     cpu_by_ioreq_id name implies the array is indexed by an id
>     carries in the ioreq.
>       Renamed cpu_by_ioreq_id to cpu_by_vcpu_id.
>     Is cpu_get_vmport_ioreq_from_shared_memory worth its own
>     function?
>       Moved in-line.
>     I don't think you need the barrier anyway.
>       Dropped the barrier.
>     Oh, I now realize you mean the same theoretical rather than
>     actual limit, in which case this can be a build time check
>     anyway.
>       Switch to build time check, move to a better place.
>     You could avoid passing state to both of them by setting
>     current_cpu here couldn't you?
>       Yes, moved state usage to handle_vmport_ioreq().
>     Error out if it fails with error != -ENOSYS.
>       Done.
> 
> v4:
>     Please try to get rid of the #ifdefs.
>       Moved 2 #ifdefs into hw/xen/xen_common.h
> 
> 
>  include/hw/xen/xen_common.h |  15 ++++++
>  xen-hvm.c                   | 108 
> ++++++++++++++++++++++++++++++++++++++++++--
>  2 files changed, 118 insertions(+), 5 deletions(-)
> 
> diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h
> index 07731b9..95612a4 100644
> --- a/include/hw/xen/xen_common.h
> +++ b/include/hw/xen/xen_common.h
> @@ -164,4 +164,19 @@ void destroy_hvm_domain(bool reboot);
>  /* shutdown/destroy current domain because of an error */
>  void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
>  
> +#ifdef HVM_PARAM_VMPORT_REGS_PFN
> +static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
> +                                          unsigned long *vmport_regs_pfn)
> +{
> +    return xc_get_hvm_param(xc, dom, HVM_PARAM_VMPORT_REGS_PFN,
> +                            vmport_regs_pfn);
> +}
> +#else
> +static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
> +                                          unsigned long *vmport_regs_pfn)
> +{
> +    return -ENOSYS;
> +}
> +#endif
> +
>  #endif /* QEMU_HW_XEN_COMMON_H */
> diff --git a/xen-hvm.c b/xen-hvm.c
> index 05e522c..21f1cbb 100644
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -41,6 +41,29 @@ static MemoryRegion *framebuffer;
>  static bool xen_in_migration;
>  
>  /* Compatibility with older version */
> +
> +/* This allows QEMU to build on a system that has Xen 4.5 or earlier
> + * installed.  This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
> + * needs to be included before this block and hw/xen/xen_common.h needs to
> + * be included before xen/hvm/ioreq.h
> + */
> +#ifndef IOREQ_TYPE_VMWARE_PORT
> +#define IOREQ_TYPE_VMWARE_PORT  3
> +struct vmware_regs {
> +    uint32_t esi;
> +    uint32_t edi;
> +    uint32_t ebx;
> +    uint32_t ecx;
> +    uint32_t edx;
> +};
> +typedef struct vmware_regs vmware_regs_t;
> +
> +struct shared_vmport_iopage {
> +    struct vmware_regs vcpu_vmport_regs[1];
> +};
> +typedef struct shared_vmport_iopage shared_vmport_iopage_t;
> +#endif
> +
>  #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
>  static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
>  {
> @@ -79,8 +102,10 @@ typedef struct XenPhysmap {
>  
>  typedef struct XenIOState {
>      shared_iopage_t *shared_page;
> +    shared_vmport_iopage_t *shared_vmport_page;
>      buffered_iopage_t *buffered_io_page;
>      QEMUTimer *buffered_io_timer;
> +    CPUState **cpu_by_vcpu_id;
>      /* the evtchn port for polling the notification, */
>      evtchn_port_t *ioreq_local_port;
>      /* evtchn local port for buffered io */
> @@ -773,7 +798,50 @@ static void cpu_ioreq_move(ioreq_t *req)
>      }
>  }
>  
> -static void handle_ioreq(ioreq_t *req)
> +static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
> +{
> +    X86CPU *cpu;
> +    CPUX86State *env;
> +
> +    cpu = X86_CPU(current_cpu);
> +    env = &cpu->env;
> +    env->regs[R_EAX] = req->data;
> +    env->regs[R_EBX] = vmport_regs->ebx;
> +    env->regs[R_ECX] = vmport_regs->ecx;
> +    env->regs[R_EDX] = vmport_regs->edx;
> +    env->regs[R_ESI] = vmport_regs->esi;
> +    env->regs[R_EDI] = vmport_regs->edi;
> +}
> +
> +static void regs_from_cpu(vmware_regs_t *vmport_regs)
> +{
> +    X86CPU *cpu = X86_CPU(current_cpu);
> +    CPUX86State *env = &cpu->env;
> +
> +    vmport_regs->ebx = env->regs[R_EBX];
> +    vmport_regs->ecx = env->regs[R_ECX];
> +    vmport_regs->edx = env->regs[R_EDX];
> +    vmport_regs->esi = env->regs[R_ESI];
> +    vmport_regs->edi = env->regs[R_EDI];
> +}
> +
> +static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
> +{
> +    vmware_regs_t *vmport_regs;
> +
> +    assert(state->shared_vmport_page);
> +    vmport_regs =
> +        &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
> +    QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
> +
> +    current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
> +    regs_to_cpu(vmport_regs, req);
> +    cpu_ioreq_pio(req);
> +    regs_from_cpu(vmport_regs);
> +    current_cpu = NULL;
> +}
> +
> +static void handle_ioreq(XenIOState *state, ioreq_t *req)
>  {
>      if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
>              (req->size < sizeof (target_ulong))) {
> @@ -787,6 +855,9 @@ static void handle_ioreq(ioreq_t *req)
>          case IOREQ_TYPE_COPY:
>              cpu_ioreq_move(req);
>              break;
> +        case IOREQ_TYPE_VMWARE_PORT:
> +            handle_vmport_ioreq(state, req);
> +            break;
>          case IOREQ_TYPE_TIMEOFFSET:
>              break;
>          case IOREQ_TYPE_INVALIDATE:
> @@ -828,7 +899,7 @@ static int handle_buffered_iopage(XenIOState *state)
>              req.data |= ((uint64_t)buf_req->data) << 32;
>          }
>  
> -        handle_ioreq(&req);
> +        handle_ioreq(state, &req);
>  
>          xen_mb();
>          state->buffered_io_page->read_pointer += qw ? 2 : 1;
> @@ -857,14 +928,16 @@ static void cpu_handle_ioreq(void *opaque)
>  
>      handle_buffered_iopage(state);
>      if (req) {
> -        handle_ioreq(req);
> +        handle_ioreq(state, req);
>  
>          if (req->state != STATE_IOREQ_INPROCESS) {
>              fprintf(stderr, "Badness in I/O request ... not in service?!: "
>                      "%x, ptr: %x, port: %"PRIx64", "
> -                    "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" 
> FMT_ioreq_size "\n",
> +                    "data: %"PRIx64", count: %" FMT_ioreq_size
> +                    ", size: %" FMT_ioreq_size
> +                    ", type: %"FMT_ioreq_size"\n",
>                      req->state, req->data_is_ptr, req->addr,
> -                    req->data, req->count, req->size);
> +                    req->data, req->count, req->size, req->type);
>              destroy_hvm_domain(false);
>              return;
>          }
> @@ -904,6 +977,14 @@ static void xen_main_loop_prepare(XenIOState *state)
>                                                   state);
>  
>      if (evtchn_fd != -1) {
> +        CPUState *cpu_state;
> +
> +        DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
> +        CPU_FOREACH(cpu_state) {
> +            DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
> +                    __func__, cpu_state->cpu_index, cpu_state);
> +            state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
> +        }
>          qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
>      }
>  }
> @@ -1020,6 +1101,20 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, 
> ram_addr_t *above_4g_mem_size,
>                   errno, xen_xc);
>      }
>  
> +    rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
> +    if (!rc) {
> +        DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
> +        state->shared_vmport_page =
> +            xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
> +                                 PROT_READ|PROT_WRITE, ioreq_pfn);
> +        if (state->shared_vmport_page == NULL) {
> +            hw_error("map shared vmport IO page returned error %d handle="
> +                     XC_INTERFACE_FMT, errno, xen_xc);
> +        }
> +    } else if (rc != -ENOSYS) {
> +        hw_error("get vmport regs pfn returned error %d, rc=%d", errno, rc);
> +    }
> +
>      xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
>      DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
>      state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, 
> XC_PAGE_SIZE,
> @@ -1028,6 +1123,9 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, 
> ram_addr_t *above_4g_mem_size,
>          hw_error("map buffered IO page returned error %d", errno);
>      }
>  
> +    /* Note: cpus is empty at this point in init */
> +    state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
> +
>      state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
>  
>      /* FIXME: how about if we overflow the page here? */
> -- 
> 1.8.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.