[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 07/10] x86/SVM: Add interrupt management code via AVIC



From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>

Enabling AVIC implicitly disables the V_IRQ, V_INTR_PRIO, V_IGN_TPR,
and V_INTR_VECTOR fields in the VMCB Control Word. Therefore, this patch
introduces new interrupt injection code via AVIC backing page.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
 xen/arch/x86/hvm/svm/avic.c        | 29 +++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/intr.c        |  4 ++++
 xen/arch/x86/hvm/svm/svm.c         | 15 +++++++++++++--
 xen/include/asm-x86/hvm/svm/avic.h |  1 +
 xen/include/asm-x86/msr-index.h    |  1 +
 5 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c
index 7cc10c313a..19caaeda53 100644
--- a/xen/arch/x86/hvm/svm/avic.c
+++ b/xen/arch/x86/hvm/svm/avic.c
@@ -522,6 +522,35 @@ void svm_avic_vmexit_do_noaccel(struct cpu_user_regs *regs)
     return;
 }
 
+void svm_avic_deliver_posted_intr(struct vcpu *v, u8 vec)
+{
+    struct vlapic *vlapic = vcpu_vlapic(v);
+
+    /* Fallback to use non-AVIC if vcpu is not enabled with AVIC. */
+    if ( !svm_avic_vcpu_enabled(v) )
+    {
+        if ( !vlapic_test_and_set_vector(vec, &vlapic->regs->data[APIC_IRR]) )
+            vcpu_kick(v);
+        return;
+    }
+
+    /* If interrupt is disabled, do not ignore the interrupt */
+    if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
+        return;
+
+    if ( vlapic_test_and_set_vector(vec, &vlapic->regs->data[APIC_IRR]) )
+        return;
+
+    /*
+     * If vcpu is running on another cpu, hit the doorbell to signal
+     * it to process interrupt. Otherwise, kick it.
+     */
+    if ( v->is_running && (v != current) )
+        wrmsrl(MSR_AMD_AVIC_DOORBELL, cpu_data[v->processor].apicid);
+    else
+        vcpu_kick(v);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 8511ff0b70..8c5a2eb2cd 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -29,6 +29,7 @@
 #include <asm/hvm/io.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/vlapic.h>
+#include <asm/hvm/svm/avic.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/intr.h>
 #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
@@ -100,6 +101,9 @@ static void svm_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
     HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
                 vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
 
+    if ( svm_avic_vcpu_enabled(v) )
+        return;
+
     /*
      * Create a dummy virtual interrupt to intercept as soon as the
      * guest can accept the real interrupt.
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b3e3c84175..15744a16a7 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1163,7 +1163,8 @@ static void noreturn svm_do_resume(struct vcpu *v)
         hvm_asid_flush_vcpu(v);
     }
 
-    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) )
+    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) &&
+         !svm_avic_vcpu_enabled(v) )
     {
         vintr_t intr;
 
@@ -1728,6 +1729,9 @@ const struct hvm_function_table * __init start_svm(void)
     if ( !cpu_has_svm_avic )
         svm_avic = 0;
 
+    if ( svm_avic )
+        svm_function_table.deliver_posted_intr  = svm_avic_deliver_posted_intr;
+
 #define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; }
     P(cpu_has_svm_npt, "Nested Page Tables (NPT)");
     P(cpu_has_svm_lbrv, "Last Branch Record (LBR) Virtualisation");
@@ -2559,7 +2563,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
      * NB. We need to preserve the low bits of the TPR to make checked builds
      * of Windows work, even though they don't actually do anything.
      */
-    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) )
+    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) &&
+         !svm_avic_vcpu_enabled(v) )
     {
         intr = vmcb_get_vintr(vmcb);
         vlapic_set_reg(vlapic, APIC_TASKPRI,
@@ -2792,6 +2797,12 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
         u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
         intr = vmcb_get_vintr(vmcb);
 
+        if ( svm_avic_vcpu_enabled(v) )
+        {
+            gdprintk(XENLOG_ERR, "AVIC VINTR:\n");
+            domain_crash(v->domain);
+        }
+
         intr.fields.irq = 0;
         general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
 
diff --git a/xen/include/asm-x86/hvm/svm/avic.h 
b/xen/include/asm-x86/hvm/svm/avic.h
index 1961daa578..8e8c4c9422 100644
--- a/xen/include/asm-x86/hvm/svm/avic.h
+++ b/xen/include/asm-x86/hvm/svm/avic.h
@@ -39,4 +39,5 @@ int svm_avic_init_vmcb(struct vcpu *v);
 void svm_avic_vmexit_do_incomp_ipi(struct cpu_user_regs *regs);
 void svm_avic_vmexit_do_noaccel(struct cpu_user_regs *regs);
 
+void svm_avic_deliver_posted_intr(struct vcpu *v, u8 vector);
 #endif /* _SVM_AVIC_H_ */
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 8416756f02..f37b08bf83 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -189,6 +189,7 @@
 #define MSR_K8_ENABLE_C1E              0xc0010055
 #define MSR_K8_VM_CR                   0xc0010114
 #define MSR_K8_VM_HSAVE_PA             0xc0010117
+#define MSR_AMD_AVIC_DOORBELL          0xc001011b
 
 #define MSR_AMD_FAM15H_EVNTSEL0                0xc0010200
 #define MSR_AMD_FAM15H_PERFCTR0                0xc0010201
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.