[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH V3 01/23] x86/ioreq: Prepare IOREQ feature for making it common



On 30.11.2020 11:31, Oleksandr Tyshchenko wrote:
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -17,15 +17,15 @@
>   */
>  
>  #include <xen/ctype.h>
> +#include <xen/domain.h>
> +#include <xen/event.h>
>  #include <xen/init.h>
> +#include <xen/irq.h>
>  #include <xen/lib.h>
> -#include <xen/trace.h>
> +#include <xen/paging.h>
>  #include <xen/sched.h>
> -#include <xen/irq.h>
>  #include <xen/softirq.h>
> -#include <xen/domain.h>
> -#include <xen/event.h>
> -#include <xen/paging.h>
> +#include <xen/trace.h>
>  #include <xen/vpci.h>

Seeing this consolidation (thanks!), have you been able to figure
out what xen/ctype.h is needed for here? It looks to me as if it
could be dropped at the same time.

> @@ -601,7 +610,7 @@ static int hvm_ioreq_server_map_pages(struct 
> hvm_ioreq_server *s)
>      return rc;
>  }
>  
> -static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
> +void arch_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
>  {
>      hvm_unmap_ioreq_gfn(s, true);
>      hvm_unmap_ioreq_gfn(s, false);

How is this now different from ...

> @@ -674,6 +683,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
> hvm_ioreq_server *s,
>      return rc;
>  }
>  
> +void arch_ioreq_server_enable(struct hvm_ioreq_server *s)
> +{
> +    hvm_remove_ioreq_gfn(s, false);
> +    hvm_remove_ioreq_gfn(s, true);
> +}

... this? Imo if at all possible there should be no such duplication
(i.e. at least have this one simply call the earlier one).

> @@ -1080,6 +1105,24 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
> *d, ioservid_t id,
>      return rc;
>  }
>  
> +/* Called with ioreq_server lock held */
> +int arch_ioreq_server_map_mem_type(struct domain *d,
> +                                   struct hvm_ioreq_server *s,
> +                                   uint32_t flags)
> +{
> +    int rc = p2m_set_ioreq_server(d, flags, s);
> +
> +    if ( rc == 0 && flags == 0 )
> +    {
> +        const struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +
> +        if ( read_atomic(&p2m->ioreq.entry_count) )
> +            p2m_change_entry_type_global(d, p2m_ioreq_server, p2m_ram_rw);
> +    }
> +
> +    return rc;
> +}
> +
>  /*
>   * Map or unmap an ioreq server to specific memory type. For now, only
>   * HVMMEM_ioreq_server is supported, and in the future new types can be
> @@ -1112,19 +1155,11 @@ int hvm_map_mem_type_to_ioreq_server(struct domain 
> *d, ioservid_t id,
>      if ( s->emulator != current->domain )
>          goto out;
>  
> -    rc = p2m_set_ioreq_server(d, flags, s);
> +    rc = arch_ioreq_server_map_mem_type(d, s, flags);
>  
>   out:
>      spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
>  
> -    if ( rc == 0 && flags == 0 )
> -    {
> -        struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -
> -        if ( read_atomic(&p2m->ioreq.entry_count) )
> -            p2m_change_entry_type_global(d, p2m_ioreq_server, p2m_ram_rw);
> -    }
> -
>      return rc;
>  }

While you mention this change in the description, I'm still
missing justification as to why the change is safe to make. I
continue to think p2m_change_entry_type_global() would better
not be called inside the locked region, if at all possible.

> @@ -1239,33 +1279,28 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
>      spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
>  }
>  
> -struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
> -                                                 ioreq_t *p)
> +int arch_ioreq_server_get_type_addr(const struct domain *d,
> +                                    const ioreq_t *p,
> +                                    uint8_t *type,
> +                                    uint64_t *addr)
>  {
> -    struct hvm_ioreq_server *s;
> -    uint32_t cf8;
> -    uint8_t type;
> -    uint64_t addr;
> -    unsigned int id;
> +    unsigned int cf8 = d->arch.hvm.pci_cf8;
>  
>      if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
> -        return NULL;
> -
> -    cf8 = d->arch.hvm.pci_cf8;
> +        return -EINVAL;

The caller cares about only a boolean. Either make the function
return bool, or (imo better, but others may like this less) have
it return "type" instead of using indirection, using e.g.
negative values to identify errors (which then could still be
errno ones if you wish).

> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -19,6 +19,25 @@
>  #ifndef __ASM_X86_HVM_IOREQ_H__
>  #define __ASM_X86_HVM_IOREQ_H__
>  
> +#define HANDLE_BUFIOREQ(s) \
> +    ((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF)
> +
> +bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion);
> +int arch_ioreq_server_map_pages(struct hvm_ioreq_server *s);
> +void arch_ioreq_server_unmap_pages(struct hvm_ioreq_server *s);
> +void arch_ioreq_server_enable(struct hvm_ioreq_server *s);
> +void arch_ioreq_server_disable(struct hvm_ioreq_server *s);
> +void arch_ioreq_server_destroy(struct hvm_ioreq_server *s);
> +int arch_ioreq_server_map_mem_type(struct domain *d,
> +                                   struct hvm_ioreq_server *s,
> +                                   uint32_t flags);
> +bool arch_ioreq_server_destroy_all(struct domain *d);
> +int arch_ioreq_server_get_type_addr(const struct domain *d,
> +                                    const ioreq_t *p,
> +                                    uint8_t *type,
> +                                    uint64_t *addr);
> +void arch_ioreq_domain_init(struct domain *d);
> +
>  bool hvm_io_pending(struct vcpu *v);
>  bool handle_hvm_io_completion(struct vcpu *v);
>  bool is_ioreq_server_page(struct domain *d, const struct page_info *page);

What's the plan here? Introduce them into the x86 header just
to later move the entire block into the common one? Wouldn't
it make sense to introduce the common header here right away?
Or do you expect to convert some of the simpler ones to inline
functions later on?

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.