[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/2] x86/HVM: alter completion-needed checking



> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: 09 April 2018 14:24
> To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; Jun Nakajima <jun.nakajima@xxxxxxxxx>; Kevin
> Tian <kevin.tian@xxxxxxxxx>; Juergen Gross <jgross@xxxxxxxx>
> Subject: [PATCH 2/2] x86/HVM: alter completion-needed checking
> 
> The function only looks at the ioreq_t, so pass it a pointer to just
> that. Also use it in hvmemul_do_io().
> 
> Suggested-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> RFC: While this avoids some open coding, generated code looks to be
>      worse for that particular case. I'm therefore not certain that we
>      want this change (or perhaps just the function name/signature
>      change portion).
> 

FAOD my reason for suggesting it was such that exactly the same test 
implementation is used in all cases to decide whether I/O completion is needed.

  Paul

> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -282,11 +282,7 @@ static int hvmemul_do_io(
>              rc = hvm_send_ioreq(s, &p, 0);
>              if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
>                  vio->io_req.state = STATE_IOREQ_NONE;
> -            /*
> -             * This effectively is !hvm_vcpu_io_need_completion(vio), 
> slightly
> -             * optimized and using local variables we have available.
> -             */
> -            else if ( data_is_addr || (!is_mmio && dir == IOREQ_WRITE) )
> +            else if ( !hvm_ioreq_needs_completion(&vio->io_req) )
>                  rc = X86EMUL_OKAY;
>          }
>          break;
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -89,7 +89,7 @@ bool hvm_emulate_one_insn(hvm_emulate_va
> 
>      rc = hvm_emulate_one(&ctxt);
> 
> -    if ( hvm_vcpu_io_need_completion(vio) )
> +    if ( hvm_ioreq_needs_completion(&vio->io_req) )
>          vio->io_completion = HVMIO_mmio_completion;
>      else
>          vio->mmio_access = (struct npfec){};
> @@ -142,7 +142,7 @@ bool handle_pio(uint16_t port, unsigned
> 
>      rc = hvmemul_do_pio_buffer(port, size, dir, &data);
> 
> -    if ( hvm_vcpu_io_need_completion(vio) )
> +    if ( hvm_ioreq_needs_completion(&vio->io_req) )
>          vio->io_completion = HVMIO_pio_completion;
> 
>      switch ( rc )
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -110,15 +110,15 @@ bool hvm_io_pending(struct vcpu *v)
>  static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
>  {
>      struct vcpu *v = sv->vcpu;
> -    struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
> +    ioreq_t *ioreq = &v->arch.hvm_vcpu.hvm_io.io_req;
> 
> -    if ( hvm_vcpu_io_need_completion(vio) )
> +    if ( hvm_ioreq_needs_completion(ioreq) )
>      {
> -        vio->io_req.state = STATE_IORESP_READY;
> -        vio->io_req.data = data;
> +        ioreq->state = STATE_IORESP_READY;
> +        ioreq->data = data;
>      }
>      else
> -        vio->io_req.state = STATE_IOREQ_NONE;
> +        ioreq->state = STATE_IOREQ_NONE;
> 
>      msix_write_completion(v);
>      vcpu_end_shutdown_deferral(v);
> --- a/xen/arch/x86/hvm/vmx/realmode.c
> +++ b/xen/arch/x86/hvm/vmx/realmode.c
> @@ -103,7 +103,7 @@ void vmx_realmode_emulate_one(struct hvm
> 
>      rc = hvm_emulate_one(hvmemul_ctxt);
> 
> -    if ( hvm_vcpu_io_need_completion(vio) )
> +    if ( hvm_ioreq_needs_completion(&vio->io_req) )
>          vio->io_completion = HVMIO_realmode_completion;
> 
>      if ( rc == X86EMUL_UNHANDLEABLE )
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -91,12 +91,11 @@ struct hvm_vcpu_io {
>      const struct g2m_ioport *g2m_ioport;
>  };
> 
> -static inline bool hvm_vcpu_io_need_completion(const struct hvm_vcpu_io
> *vio)
> +static inline bool hvm_ioreq_needs_completion(const ioreq_t *ioreq)
>  {
> -    return (vio->io_req.state == STATE_IOREQ_READY) &&
> -           !vio->io_req.data_is_ptr &&
> -           (vio->io_req.type != IOREQ_TYPE_PIO ||
> -            vio->io_req.dir != IOREQ_WRITE);
> +    return ioreq->state == STATE_IOREQ_READY &&
> +           !ioreq->data_is_ptr &&
> +           (ioreq->type != IOREQ_TYPE_PIO || ioreq->dir != IOREQ_WRITE);
>  }
> 
>  struct nestedvcpu {
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.