[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.13 v4] x86/vmx: always sync PIR to IRR before vmentry



When using posted interrupts on Intel hardware it's possible that the
vCPU resumes execution with a stale local APIC IRR register because
depending on the interrupts to be injected vlapic_has_pending_irq
might not be called, and thus PIR won't be synced into IRR.

Fix this by making sure PIR is always synced to IRR in
hvm_vcpu_has_pending_irq regardless of what interrupts are pending.

Reported-by: Joe Jin <joe.jin@xxxxxxxxxx>
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: Juergen Gross <jgross@xxxxxxxx>
---
Changes since v3:
 - Introduce and use vlapic_sync_pir_to_irr in order to sync PIR with
   IRR.
 - Do not move the call to vlapic_has_pending_irq in
   hvm_vcpu_has_pending_irq.
 - Remove the changes done to __vmx_deliver_posted_interrupt.

Changes since v2:
 - Raise a softirq if in interrupt context and the vCPU is the current
   one.
 - Use is_running instead of runnable.
 - Remove the call to vmx_sync_pir_to_irr in vmx_intr_assist and
   instead always call vlapic_has_pending_irq in
   hvm_vcpu_has_pending_irq.
---
 xen/arch/x86/hvm/irq.c           |  9 +++++++++
 xen/arch/x86/hvm/vlapic.c        | 10 ++--------
 xen/include/asm-x86/hvm/vlapic.h |  6 ++++++
 3 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index e03a87ad50..c684422b24 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -517,6 +517,15 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
     struct hvm_domain *plat = &v->domain->arch.hvm;
     int vector;
 
+    /*
+     * Always call vlapic_sync_pir_to_irr so that PIR is synced into IRR when
+     * using posted interrupts. Note this is also done by
+     * vlapic_has_pending_irq but depending on which interrupts are pending
+     * hvm_vcpu_has_pending_irq will return early without calling
+     * vlapic_has_pending_irq.
+     */
+    vlapic_sync_pir_to_irr(v);
+
     if ( unlikely(v->nmi_pending) )
         return hvm_intack_nmi;
 
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 9466258d6f..6fcce95713 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -106,15 +106,9 @@ static void vlapic_clear_irr(int vector, struct vlapic 
*vlapic)
     vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
 }
 
-static void sync_pir_to_irr(struct vcpu *v)
-{
-    if ( hvm_funcs.sync_pir_to_irr )
-        alternative_vcall(hvm_funcs.sync_pir_to_irr, v);
-}
-
 static int vlapic_find_highest_irr(struct vlapic *vlapic)
 {
-    sync_pir_to_irr(vlapic_vcpu(vlapic));
+    vlapic_sync_pir_to_irr(vlapic_vcpu(vlapic));
 
     return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
 }
@@ -1493,7 +1487,7 @@ static int lapic_save_regs(struct vcpu *v, 
hvm_domain_context_t *h)
     if ( !has_vlapic(v->domain) )
         return 0;
 
-    sync_pir_to_irr(v);
+    vlapic_sync_pir_to_irr(v);
 
     return hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, vcpu_vlapic(v)->regs);
 }
diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h
index dde66b4f0f..f0d5e3fbc9 100644
--- a/xen/include/asm-x86/hvm/vlapic.h
+++ b/xen/include/asm-x86/hvm/vlapic.h
@@ -150,4 +150,10 @@ bool_t vlapic_match_dest(
     const struct vlapic *target, const struct vlapic *source,
     int short_hand, uint32_t dest, bool_t dest_mode);
 
+static inline void vlapic_sync_pir_to_irr(struct vcpu *v)
+{
+    if ( hvm_funcs.sync_pir_to_irr )
+        alternative_vcall(hvm_funcs.sync_pir_to_irr, v);
+}
+
 #endif /* __ASM_X86_HVM_VLAPIC_H__ */
-- 
2.24.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.