[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/4] VMX: Properly handle pi when all the assigned devices are removed



This patch handles some concern case when the last assigned device
is removed from the domain. In this case we should carefully handle
pi descriptor and the per-cpu blocking list, to make sure:
- all the PI descriptor are in the right state when next time a
devices is assigned to the domain again. This is achrived by always
making all the pi hooks available, so the pi descriptor is updated
during scheduling, which make it always up-to-data.
- No remaining vcpus of the domain in the per-cpu blocking list.

Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c         | 75 +++++++++++++++++++++++++++++++-------
 xen/include/asm-x86/hvm/vmx/vmcs.h |  3 ++
 2 files changed, 65 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bc4410f..65f5288 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -113,7 +113,19 @@ static void vmx_vcpu_block(struct vcpu *v)
                &per_cpu(vmx_pi_blocking, v->processor).lock;
     struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
 
-    spin_lock_irqsave(pi_blocking_list_lock, flags);
+    spin_lock_irqsave(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+    if ( unlikely(v->arch.hvm_vmx.pi_blocking_cleaned_up) )
+    {
+        /*
+         * The vcpu is to be destroyed and it has already been removed
+         * from the per-CPU list if it is blocking, we shouldn't add
+         * new vCPU to the list.
+         */
+        spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+        return;
+    }
+
+    spin_lock(pi_blocking_list_lock);
     old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL,
                        pi_blocking_list_lock);
 
@@ -126,7 +138,9 @@ static void vmx_vcpu_block(struct vcpu *v)
 
     list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
                   &per_cpu(vmx_pi_blocking, v->processor).list);
-    spin_unlock_irqrestore(pi_blocking_list_lock, flags);
+    spin_unlock(pi_blocking_list_lock);
+
+    spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
 
     ASSERT(!pi_test_sn(pi_desc));
 
@@ -199,32 +213,65 @@ static void vmx_pi_do_resume(struct vcpu *v)
     spin_unlock_irqrestore(pi_blocking_list_lock, flags);
 }
 
+static void vmx_pi_blocking_cleanup(struct vcpu *v)
+{
+    unsigned long flags;
+    spinlock_t *pi_blocking_list_lock;
+
+    if ( !iommu_intpost )
+        return;
+
+    spin_lock_irqsave(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+    v->arch.hvm_vmx.pi_blocking_cleaned_up = 1;
+
+    pi_blocking_list_lock = v->arch.hvm_vmx.pi_blocking.lock;
+    if (pi_blocking_list_lock == NULL)
+    {
+        spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+        return;
+    }
+
+    spin_lock(pi_blocking_list_lock);
+    if ( v->arch.hvm_vmx.pi_blocking.lock != NULL )
+    {
+        ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
+        list_del(&v->arch.hvm_vmx.pi_blocking.list);
+        v->arch.hvm_vmx.pi_blocking.lock = NULL;
+    }
+    spin_unlock(pi_blocking_list_lock);
+    spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+}
+
 /* This function is called when pcidevs_lock is held */
 void vmx_pi_hooks_assign(struct domain *d)
 {
+    struct vcpu *v;
+
     if ( !iommu_intpost || !has_hvm_container_domain(d) )
         return;
 
-    ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
+    for_each_vcpu ( d, v )
+        v->arch.hvm_vmx.pi_blocking_cleaned_up = 0;
 
-    d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
-    d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
-    d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
-    d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+    if ( !d->arch.hvm_domain.vmx.vcpu_block )
+    {
+        d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
+        d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
+        d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
+        d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+    }
 }
 
 /* This function is called when pcidevs_lock is held */
 void vmx_pi_hooks_deassign(struct domain *d)
 {
+    struct vcpu *v;
+
     if ( !iommu_intpost || !has_hvm_container_domain(d) )
         return;
 
-    ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
-
-    d->arch.hvm_domain.vmx.vcpu_block = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_from = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_to = NULL;
-    d->arch.hvm_domain.vmx.pi_do_resume = NULL;
+    for_each_vcpu ( d, v )
+        vmx_pi_blocking_cleanup(v);
 }
 
 static int vmx_domain_initialise(struct domain *d)
@@ -256,6 +303,8 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 
     INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocking.list);
 
+    spin_lock_init(&v->arch.hvm_vmx.pi_hotplug_lock);
+
     v->arch.schedule_tail    = vmx_do_resume;
     v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
     v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index b54f52f..3834f49 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -231,6 +231,9 @@ struct arch_vmx_struct {
      * pCPU and wakeup the related vCPU.
      */
     struct pi_blocking_vcpu pi_blocking;
+
+    spinlock_t            pi_hotplug_lock;
+    bool_t                pi_blocking_cleaned_up;
 };
 
 int vmx_create_vmcs(struct vcpu *v);
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.