[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/3] VMX: Properly adjuest the status of pi descriptor



> From: Wu, Feng
> Sent: Friday, May 20, 2016 4:54 PM
> 
> When the last assigned device is dettached from the domain, all
> the PI related hooks are removed then, however, the vCPU can be
> blocked, switched to another pCPU, etc, all without the aware of
> PI. After the next time we attach another device to the domain,
> which makes the PI realted hooks avaliable again, the status
> of the pi descriptor is not true, we need to properly adjust
> it.

Instead of adjusting pi descriptor in multiple places, can we
simply reset the status (including removing from block list)
right when hooks are removed at deattach?

> 
> Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
> ---
>  xen/arch/x86/hvm/vmx/vmx.c         | 29
> ++++++++++++++++++++++++++---
>  xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +
>  2 files changed, 27 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index bc4410f..3fbc7b1 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -107,12 +107,22 @@ void vmx_pi_per_cpu_init(unsigned int cpu)
>  static void vmx_vcpu_block(struct vcpu *v)
>  {
>      unsigned long flags;
> -    unsigned int dest;
> +    unsigned int dest = cpu_physical_id(v->processor);
>      spinlock_t *old_lock;
>      spinlock_t *pi_blocking_list_lock =
>               &per_cpu(vmx_pi_blocking, v->processor).lock;
>      struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
> 
> +    if (v->arch.hvm_vmx.pi_back_from_hotplug == 1)
> +    {
> +        write_atomic(&pi_desc->ndst,
> +                     x2apic_enabled ? dest : MASK_INSR(dest, 
> PI_xAPIC_NDST_MASK));
> +        write_atomic(&pi_desc->nv, posted_intr_vector);
> +        pi_clear_sn(pi_desc);
> +
> +        v->arch.hvm_vmx.pi_back_from_hotplug = 0;
> +    }
> +
>      spin_lock_irqsave(pi_blocking_list_lock, flags);
>      old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL,
>                         pi_blocking_list_lock);
> @@ -130,8 +140,6 @@ static void vmx_vcpu_block(struct vcpu *v)
> 
>      ASSERT(!pi_test_sn(pi_desc));
> 
> -    dest = cpu_physical_id(v->processor);
> -
>      ASSERT(pi_desc->ndst ==
>             (x2apic_enabled ? dest : MASK_INSR(dest, PI_xAPIC_NDST_MASK)));
> 
> @@ -164,6 +172,16 @@ static void vmx_pi_do_resume(struct vcpu *v)
>      unsigned long flags;
>      spinlock_t *pi_blocking_list_lock;
>      struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
> +    unsigned int dest = cpu_physical_id(v->processor);
> +
> +    if (v->arch.hvm_vmx.pi_back_from_hotplug == 1)
> +    {
> +        write_atomic(&pi_desc->ndst,
> +                     x2apic_enabled ? dest : MASK_INSR(dest, 
> PI_xAPIC_NDST_MASK));
> +        pi_clear_sn(pi_desc);
> +
> +        v->arch.hvm_vmx.pi_back_from_hotplug = 0;
> +    }
> 
>      ASSERT(!test_bit(_VPF_blocked, &v->pause_flags));
> 
> @@ -202,9 +220,14 @@ static void vmx_pi_do_resume(struct vcpu *v)
>  /* This function is called when pcidevs_lock is held */
>  void vmx_pi_hooks_assign(struct domain *d)
>  {
> +    struct vcpu *v;
> +
>      if ( !iommu_intpost || !has_hvm_container_domain(d) )
>          return;
> 
> +    for_each_vcpu ( d, v )
> +        v->arch.hvm_vmx.pi_back_from_hotplug = 1;
> +
>      ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
> 
>      d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
> b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index b54f52f..3feb60a 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -231,6 +231,7 @@ struct arch_vmx_struct {
>       * pCPU and wakeup the related vCPU.
>       */
>      struct pi_blocking_vcpu pi_blocking;
> +    int pi_back_from_hotplug;
>  };
> 
>  int vmx_create_vmcs(struct vcpu *v);
> --
> 2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.