[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 1/5] ioreq-server: centralize access to ioreq structures


  • To: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>
  • From: Paul Durrant <Paul.Durrant@xxxxxxxxxx>
  • Date: Thu, 30 Jan 2014 14:35:57 +0000
  • Accept-language: en-GB, en-US
  • Cc: "xen-devel@xxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxx>
  • Delivery-date: Thu, 30 Jan 2014 14:36:07 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: AQHPHcZZpkBP0yklNEimIf65ti+4fpqdQ8qAgAARD7A=
  • Thread-topic: [Xen-devel] [RFC PATCH 1/5] ioreq-server: centralize access to ioreq structures

> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 30 January 2014 14:32
> To: Paul Durrant
> Cc: xen-devel@xxxxxxxxxxxxx
> Subject: Re: [Xen-devel] [RFC PATCH 1/5] ioreq-server: centralize access to
> ioreq structures
> 
> On 30/01/14 14:19, Paul Durrant wrote:
> > To simplify creation of the ioreq server abstraction in a
> > subsequent patch, this patch centralizes all use of the shared
> > ioreq structure and the buffered ioreq ring to the source module
> > xen/arch/x86/hvm/hvm.c.
> > Also, re-work hvm_send_assist_req() slightly to complete IO
> > immediately in the case where there is no emulator (i.e. the shared
> > IOREQ ring has not been set). This should handle the case currently
> > covered by has_dm in hvmemul_do_io().
> >
> > Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> > ---
> >  xen/arch/x86/hvm/emulate.c        |   40 +++------------
> >  xen/arch/x86/hvm/hvm.c            |   98
> ++++++++++++++++++++++++++++++++++++-
> >  xen/arch/x86/hvm/io.c             |   94 
> > +----------------------------------
> >  xen/include/asm-x86/hvm/hvm.h     |    3 +-
> >  xen/include/asm-x86/hvm/support.h |    9 ----
> >  5 files changed, 108 insertions(+), 136 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> > index 868aa1d..d1d3a6f 100644
> > --- a/xen/arch/x86/hvm/emulate.c
> > +++ b/xen/arch/x86/hvm/emulate.c
> > @@ -57,24 +57,11 @@ static int hvmemul_do_io(
> >      int value_is_ptr = (p_data == NULL);
> >      struct vcpu *curr = current;
> >      struct hvm_vcpu_io *vio;
> > -    ioreq_t *p = get_ioreq(curr);
> > -    ioreq_t _ioreq;
> > +    ioreq_t p[1];
> 
> I know it will make the patch sightly larger by modifying the
> indirection of p, but having an array of 1 item on the stack is seems silly.
> 

I'm following the style adopted in io.c and it is entirely to keep the patch as 
small as possible :-)
I agree it's a bit silly but I guess it would be better to keep such a change 
in a separate patch. I can add that to the sequence when I come to submit the 
patches for real.

  Paul

> >      unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
> >      p2m_type_t p2mt;
> >      struct page_info *ram_page;
> >      int rc;
> > -    bool_t has_dm = 1;
> > -
> > -    /*
> > -     * Domains without a backing DM, don't have an ioreq page.  Just
> > -     * point to a struct on the stack, initialising the state as needed.
> > -     */
> > -    if ( !p )
> > -    {
> > -        has_dm = 0;
> > -        p = &_ioreq;
> > -        p->state = STATE_IOREQ_NONE;
> > -    }
> >
> >      /* Check for paged out page */
> >      ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt,
> P2M_UNSHARE);
> > @@ -173,15 +160,6 @@ static int hvmemul_do_io(
> >          return X86EMUL_UNHANDLEABLE;
> >      }
> >
> > -    if ( p->state != STATE_IOREQ_NONE )
> > -    {
> > -        gdprintk(XENLOG_WARNING, "WARNING: io already pending
> (%d)?\n",
> > -                 p->state);
> > -        if ( ram_page )
> > -            put_page(ram_page);
> > -        return X86EMUL_UNHANDLEABLE;
> > -    }
> > -
> >      vio->io_state =
> >          (p_data == NULL) ? HVMIO_dispatched :
> HVMIO_awaiting_completion;
> >      vio->io_size = size;
> > @@ -193,6 +171,7 @@ static int hvmemul_do_io(
> >      if ( vio->mmio_retrying )
> >          *reps = 1;
> >
> > +    p->state = STATE_IOREQ_NONE;
> >      p->dir = dir;
> >      p->data_is_ptr = value_is_ptr;
> >      p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO;
> > @@ -232,20 +211,15 @@ static int hvmemul_do_io(
> >              vio->io_state = HVMIO_handle_mmio_awaiting_completion;
> >          break;
> >      case X86EMUL_UNHANDLEABLE:
> > -        /* If there is no backing DM, just ignore accesses */
> > -        if ( !has_dm )
> > +        rc = X86EMUL_RETRY;
> > +        if ( !hvm_send_assist_req(curr, p) )
> >          {
> >              rc = X86EMUL_OKAY;
> >              vio->io_state = HVMIO_none;
> >          }
> > -        else
> > -        {
> > -            rc = X86EMUL_RETRY;
> > -            if ( !hvm_send_assist_req(curr) )
> > -                vio->io_state = HVMIO_none;
> > -            else if ( p_data == NULL )
> > -                rc = X86EMUL_OKAY;
> > -        }
> > +        else if ( p_data == NULL )
> > +            rc = X86EMUL_OKAY;
> > +
> >          break;
> >      default:
> >          BUG();
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index 69f7e74..71a44db 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -345,6 +345,14 @@ void hvm_migrate_pirqs(struct vcpu *v)
> >      spin_unlock(&d->event_lock);
> >  }
> >
> > +static ioreq_t *get_ioreq(struct vcpu *v)
> > +{
> > +    struct domain *d = v->domain;
> > +    shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
> 
> newline here...
> 
> > +    ASSERT((v == current) || spin_is_locked(&d-
> >arch.hvm_domain.ioreq.lock));
> 
> .. and here.  (I realise that this is just code motion, but might as
> well take the opportunity to fix the style.)
> 
> > +    return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
> > +}
> > +
> >  void hvm_do_resume(struct vcpu *v)
> >  {
> >      ioreq_t *p;
> > @@ -1287,7 +1295,86 @@ void hvm_vcpu_down(struct vcpu *v)
> >      }
> >  }
> >
> > -bool_t hvm_send_assist_req(struct vcpu *v)
> > +int hvm_buffered_io_send(ioreq_t *p)
> > +{
> > +    struct vcpu *v = current;
> > +    struct hvm_ioreq_page *iorp = &v->domain-
> >arch.hvm_domain.buf_ioreq;
> > +    buffered_iopage_t *pg = iorp->va;
> > +    buf_ioreq_t bp;
> > +    /* Timeoffset sends 64b data, but no address. Use two consecutive
> slots. */
> > +    int qw = 0;
> > +
> > +    /* Ensure buffered_iopage fits in a page */
> > +    BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
> > +
> > +    /*
> > +     * Return 0 for the cases we can't deal with:
> > +     *  - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
> > +     *  - we cannot buffer accesses to guest memory buffers, as the guest
> > +     *    may expect the memory buffer to be synchronously accessed
> > +     *  - the count field is usually used with data_is_ptr and since we 
> > don't
> > +     *    support data_is_ptr we do not waste space for the count field
> either
> > +     */
> > +    if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
> > +        return 0;
> > +
> > +    bp.type = p->type;
> > +    bp.dir  = p->dir;
> > +    switch ( p->size )
> > +    {
> > +    case 1:
> > +        bp.size = 0;
> > +        break;
> > +    case 2:
> > +        bp.size = 1;
> > +        break;
> > +    case 4:
> > +        bp.size = 2;
> > +        break;
> > +    case 8:
> > +        bp.size = 3;
> > +        qw = 1;
> > +        break;
> > +    default:
> > +        gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p-
> >size);
> > +        return 0;
> > +    }
> > +
> > +    bp.data = p->data;
> > +    bp.addr = p->addr;
> > +
> > +    spin_lock(&iorp->lock);
> > +
> > +    if ( (pg->write_pointer - pg->read_pointer) >=
> > +         (IOREQ_BUFFER_SLOT_NUM - qw) )
> > +    {
> > +        /* The queue is full: send the iopacket through the normal path. */
> > +        spin_unlock(&iorp->lock);
> > +        return 0;
> > +    }
> > +
> > +    memcpy(&pg->buf_ioreq[pg->write_pointer %
> IOREQ_BUFFER_SLOT_NUM],
> > +           &bp, sizeof(bp));
> > +
> > +    if ( qw )
> > +    {
> > +        bp.data = p->data >> 32;
> > +        memcpy(&pg->buf_ioreq[(pg->write_pointer+1) %
> IOREQ_BUFFER_SLOT_NUM],
> > +               &bp, sizeof(bp));
> > +    }
> > +
> > +    /* Make the ioreq_t visible /before/ write_pointer. */
> > +    wmb();
> > +    pg->write_pointer += qw ? 2 : 1;
> > +
> > +    notify_via_xen_event_channel(v->domain,
> > +            v->domain-
> >arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
> > +    spin_unlock(&iorp->lock);
> > +
> > +    return 1;
> > +}
> > +
> > +bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *proto_p)
> >  {
> >      ioreq_t *p;
> >
> > @@ -1305,6 +1392,15 @@ bool_t hvm_send_assist_req(struct vcpu *v)
> >          return 0;
> >      }
> >
> > +    p->dir = proto_p->dir;
> > +    p->data_is_ptr = proto_p->data_is_ptr;
> > +    p->type = proto_p->type;
> > +    p->size = proto_p->size;
> > +    p->addr = proto_p->addr;
> > +    p->count = proto_p->count;
> > +    p->df = proto_p->df;
> > +    p->data = proto_p->data;
> > +
> >      prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
> >
> >      /*
> > diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> > index bf6309d..576641c 100644
> > --- a/xen/arch/x86/hvm/io.c
> > +++ b/xen/arch/x86/hvm/io.c
> > @@ -46,85 +46,6 @@
> >  #include <xen/iocap.h>
> >  #include <public/hvm/ioreq.h>
> >
> > -int hvm_buffered_io_send(ioreq_t *p)
> > -{
> > -    struct vcpu *v = current;
> > -    struct hvm_ioreq_page *iorp = &v->domain-
> >arch.hvm_domain.buf_ioreq;
> > -    buffered_iopage_t *pg = iorp->va;
> > -    buf_ioreq_t bp;
> > -    /* Timeoffset sends 64b data, but no address. Use two consecutive
> slots. */
> > -    int qw = 0;
> > -
> > -    /* Ensure buffered_iopage fits in a page */
> > -    BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
> > -
> > -    /*
> > -     * Return 0 for the cases we can't deal with:
> > -     *  - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
> > -     *  - we cannot buffer accesses to guest memory buffers, as the guest
> > -     *    may expect the memory buffer to be synchronously accessed
> > -     *  - the count field is usually used with data_is_ptr and since we 
> > don't
> > -     *    support data_is_ptr we do not waste space for the count field 
> > either
> > -     */
> > -    if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
> > -        return 0;
> > -
> > -    bp.type = p->type;
> > -    bp.dir  = p->dir;
> > -    switch ( p->size )
> > -    {
> > -    case 1:
> > -        bp.size = 0;
> > -        break;
> > -    case 2:
> > -        bp.size = 1;
> > -        break;
> > -    case 4:
> > -        bp.size = 2;
> > -        break;
> > -    case 8:
> > -        bp.size = 3;
> > -        qw = 1;
> > -        break;
> > -    default:
> > -        gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p-
> >size);
> > -        return 0;
> > -    }
> > -
> > -    bp.data = p->data;
> > -    bp.addr = p->addr;
> > -
> > -    spin_lock(&iorp->lock);
> > -
> > -    if ( (pg->write_pointer - pg->read_pointer) >=
> > -         (IOREQ_BUFFER_SLOT_NUM - qw) )
> > -    {
> > -        /* The queue is full: send the iopacket through the normal path. */
> > -        spin_unlock(&iorp->lock);
> > -        return 0;
> > -    }
> > -
> > -    memcpy(&pg->buf_ioreq[pg->write_pointer %
> IOREQ_BUFFER_SLOT_NUM],
> > -           &bp, sizeof(bp));
> > -
> > -    if ( qw )
> > -    {
> > -        bp.data = p->data >> 32;
> > -        memcpy(&pg->buf_ioreq[(pg->write_pointer+1) %
> IOREQ_BUFFER_SLOT_NUM],
> > -               &bp, sizeof(bp));
> > -    }
> > -
> > -    /* Make the ioreq_t visible /before/ write_pointer. */
> > -    wmb();
> > -    pg->write_pointer += qw ? 2 : 1;
> > -
> > -    notify_via_xen_event_channel(v->domain,
> > -            v->domain-
> >arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
> > -    spin_unlock(&iorp->lock);
> > -
> > -    return 1;
> > -}
> > -
> >  void send_timeoffset_req(unsigned long timeoff)
> >  {
> >      ioreq_t p[1];
> > @@ -150,25 +71,14 @@ void send_timeoffset_req(unsigned long timeoff)
> >  void send_invalidate_req(void)
> >  {
> >      struct vcpu *v = current;
> > -    ioreq_t *p = get_ioreq(v);
> > -
> > -    if ( !p )
> > -        return;
> > -
> > -    if ( p->state != STATE_IOREQ_NONE )
> > -    {
> > -        gdprintk(XENLOG_ERR, "WARNING: send invalidate req with
> something "
> > -                 "already pending (%d)?\n", p->state);
> > -        domain_crash(v->domain);
> > -        return;
> > -    }
> > +    ioreq_t p[1];
> 
> This can all be reduced to a single item, and even using C structure
> initialisation rather than 4 explicit assignments.
> 
> ~Andrew
> 
> >
> >      p->type = IOREQ_TYPE_INVALIDATE;
> >      p->size = 4;
> >      p->dir = IOREQ_WRITE;
> >      p->data = ~0UL; /* flush all */
> >
> > -    (void)hvm_send_assist_req(v);
> > +    (void)hvm_send_assist_req(v, p);
> >  }
> >
> >  int handle_mmio(void)
> > diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-
> x86/hvm/hvm.h
> > index ccca5df..4e8fee8 100644
> > --- a/xen/include/asm-x86/hvm/hvm.h
> > +++ b/xen/include/asm-x86/hvm/hvm.h
> > @@ -26,6 +26,7 @@
> >  #include <asm/hvm/asid.h>
> >  #include <public/domctl.h>
> >  #include <public/hvm/save.h>
> > +#include <public/hvm/ioreq.h>
> >  #include <asm/mm.h>
> >
> >  /* Interrupt acknowledgement sources. */
> > @@ -223,7 +224,7 @@ int prepare_ring_for_helper(struct domain *d,
> unsigned long gmfn,
> >                              struct page_info **_page, void **_va);
> >  void destroy_ring_for_helper(void **_va, struct page_info *page);
> >
> > -bool_t hvm_send_assist_req(struct vcpu *v);
> > +bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *p);
> >
> >  void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
> >  int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
> > diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> > index 3529499..b6af3c5 100644
> > --- a/xen/include/asm-x86/hvm/support.h
> > +++ b/xen/include/asm-x86/hvm/support.h
> > @@ -22,19 +22,10 @@
> >  #define __ASM_X86_HVM_SUPPORT_H__
> >
> >  #include <xen/types.h>
> > -#include <public/hvm/ioreq.h>
> >  #include <xen/sched.h>
> >  #include <xen/hvm/save.h>
> >  #include <asm/processor.h>
> >
> > -static inline ioreq_t *get_ioreq(struct vcpu *v)
> > -{
> > -    struct domain *d = v->domain;
> > -    shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
> > -    ASSERT((v == current) || spin_is_locked(&d-
> >arch.hvm_domain.ioreq.lock));
> > -    return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
> > -}
> > -
> >  #define HVM_DELIVER_NO_ERROR_CODE  -1
> >
> >  #ifndef NDEBUG


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.