[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static functions



> -----Original Message-----
> From: Chao Gao [mailto:chao.gao@xxxxxxxxx]
> Sent: 06 December 2017 07:50
> To: xen-devel@xxxxxxxxxxxxx
> Cc: Chao Gao <chao.gao@xxxxxxxxx>; Andrew Cooper
> <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>; Paul
> Durrant <Paul.Durrant@xxxxxxxxxx>
> Subject: [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static
> functions
> 
> It is a preparation to support multiple IOREQ pages.
> No functional change.
> 
> Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
> ---
> v4:
>  -new
> ---
>  xen/arch/x86/hvm/ioreq.c | 48 +++++++++++++++++++++++------------------
> -------
>  1 file changed, 23 insertions(+), 25 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index d991ac9..a879f20 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -237,10 +237,9 @@ static void hvm_free_ioreq_gfn(struct
> hvm_ioreq_server *s, gfn_t gfn)
>      set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
>  }
> 
> -static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
> +static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s,
> +                                struct hvm_ioreq_page *iorp)
>  {
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> -

I don't really like this approach. I'd prefer swapping the bool for an unsigned 
page index, where we follow the convention adopted in 
hvm_get_ioreq_server_frame() for which macros exist: 0 equating to the bufioreq 
page, 1+ for the struct-per-cpu pages.

  Paul

>      if ( gfn_eq(iorp->gfn, INVALID_GFN) )
>          return;
> 
> @@ -289,15 +288,15 @@ static int hvm_map_ioreq_gfn(struct
> hvm_ioreq_server *s, bool buf)
>                                   &iorp->va);
> 
>      if ( rc )
> -        hvm_unmap_ioreq_gfn(s, buf);
> +        hvm_unmap_ioreq_gfn(s, iorp);
> 
>      return rc;
>  }
> 
> -static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
> +static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s,
> +                               struct hvm_ioreq_page *iorp)
>  {
>      struct domain *currd = current->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> 
>      if ( iorp->page )
>      {
> @@ -344,10 +343,9 @@ static int hvm_alloc_ioreq_mfn(struct
> hvm_ioreq_server *s, bool buf)
>      return 0;
>  }
> 
> -static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
> +static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s,
> +                               struct hvm_ioreq_page *iorp)
>  {
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> -
>      if ( !iorp->page )
>          return;
> 
> @@ -380,11 +378,11 @@ bool is_ioreq_server_page(struct domain *d, const
> struct page_info *page)
>      return found;
>  }
> 
> -static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
> +static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s,
> +                                 struct hvm_ioreq_page *iorp)
> 
>  {
>      struct domain *d = s->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> 
>      if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
>          return;
> @@ -395,10 +393,10 @@ static void hvm_remove_ioreq_gfn(struct
> hvm_ioreq_server *s, bool buf)
>      clear_page(iorp->va);
>  }
> 
> -static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
> +static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s,
> +                             struct hvm_ioreq_page *iorp)
>  {
>      struct domain *d = s->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
>      int rc;
> 
>      if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
> @@ -550,36 +548,36 @@ static int hvm_ioreq_server_map_pages(struct
> hvm_ioreq_server *s)
>          rc = hvm_map_ioreq_gfn(s, true);
> 
>      if ( rc )
> -        hvm_unmap_ioreq_gfn(s, false);
> +        hvm_unmap_ioreq_gfn(s, &s->ioreq);
> 
>      return rc;
>  }
> 
>  static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
>  {
> -    hvm_unmap_ioreq_gfn(s, true);
> -    hvm_unmap_ioreq_gfn(s, false);
> +    hvm_unmap_ioreq_gfn(s, &s->ioreq);
> +    hvm_unmap_ioreq_gfn(s, &s->bufioreq);
>  }
> 
>  static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s)
>  {
>      int rc;
> 
> -    rc = hvm_alloc_ioreq_mfn(s, false);
> +    rc = hvm_alloc_ioreq_mfn(s, &s->ioreq);
> 
>      if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) )
> -        rc = hvm_alloc_ioreq_mfn(s, true);
> +        rc = hvm_alloc_ioreq_mfn(s, &s->bufioreq);
> 
>      if ( rc )
> -        hvm_free_ioreq_mfn(s, false);
> +        hvm_free_ioreq_mfn(s, &s->ioreq);
> 
>      return rc;
>  }
> 
>  static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s)
>  {
> -    hvm_free_ioreq_mfn(s, true);
> -    hvm_free_ioreq_mfn(s, false);
> +    hvm_free_ioreq_mfn(s, &s->bufioreq);
> +    hvm_free_ioreq_mfn(s, &s->ioreq);
>  }
> 
>  static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
> @@ -646,8 +644,8 @@ static void hvm_ioreq_server_enable(struct
> hvm_ioreq_server *s)
>      if ( s->enabled )
>          goto done;
> 
> -    hvm_remove_ioreq_gfn(s, false);
> -    hvm_remove_ioreq_gfn(s, true);
> +    hvm_remove_ioreq_gfn(s, &s->ioreq);
> +    hvm_remove_ioreq_gfn(s, &s->bufioreq);
> 
>      s->enabled = true;
> 
> @@ -667,8 +665,8 @@ static void hvm_ioreq_server_disable(struct
> hvm_ioreq_server *s)
>      if ( !s->enabled )
>          goto done;
> 
> -    hvm_add_ioreq_gfn(s, true);
> -    hvm_add_ioreq_gfn(s, false);
> +    hvm_add_ioreq_gfn(s, &s->bufioreq);
> +    hvm_add_ioreq_gfn(s, &s->ioreq);
> 
>      s->enabled = false;
> 
> --
> 1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.