[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 03/16] x86/HVM: switch virtual_intr_delivery_enabled() hook to simple boolean



From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>

This patch modifies the hvm_funcs.virtual_intr_delivery_enabled()
to become a bool variable as VMX does and SVM will simply return a
static value.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Extracted from an SVM/AVIC series patch.

--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1258,14 +1258,6 @@ void vlapic_adjust_i8259_target(struct d
     pt_adjust_global_vcpu_target(v);
 }
 
-int vlapic_virtual_intr_delivery_enabled(void)
-{
-    if ( hvm_funcs.virtual_intr_delivery_enabled )
-        return hvm_funcs.virtual_intr_delivery_enabled();
-    else
-        return 0;
-}
-
 int vlapic_has_pending_irq(struct vcpu *v)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
@@ -1278,7 +1270,7 @@ int vlapic_has_pending_irq(struct vcpu *
     if ( irr == -1 )
         return -1;
 
-    if ( vlapic_virtual_intr_delivery_enabled() &&
+    if ( hvm_funcs.virtual_intr_delivery_enabled &&
          !nestedhvm_vcpu_in_guestmode(v) )
         return irr;
 
@@ -1316,7 +1308,7 @@ int vlapic_ack_pending_irq(struct vcpu *
     int isr;
 
     if ( !force_ack &&
-         vlapic_virtual_intr_delivery_enabled() )
+         hvm_funcs.virtual_intr_delivery_enabled )
         return 1;
 
     /* If there's no chance of using APIC assist then bail now. */
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1948,11 +1948,6 @@ static void vmx_update_eoi_exit_bitmap(s
         vmx_clear_eoi_exit_bitmap(v, vector);
 }
 
-static int vmx_virtual_intr_delivery_enabled(void)
-{
-    return cpu_has_vmx_virtual_intr_delivery;
-}
-
 static void vmx_process_isr(int isr, struct vcpu *v)
 {
     unsigned long status;
@@ -2331,7 +2326,6 @@ static struct hvm_function_table __initd
     .nhvm_intr_blocked    = nvmx_intr_blocked,
     .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
     .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
-    .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
     .process_isr          = vmx_process_isr,
     .deliver_posted_intr  = vmx_deliver_posted_intr,
     .sync_pir_to_irr      = vmx_sync_pir_to_irr,
@@ -2470,6 +2464,8 @@ const struct hvm_function_table * __init
         vmx_function_table.process_isr = NULL;
         vmx_function_table.handle_eoi = NULL;
     }
+    else
+        vmx_function_table.virtual_intr_delivery_enabled = true;
 
     if ( cpu_has_vmx_posted_intr_processing )
     {
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -97,6 +97,9 @@ struct hvm_function_table {
     /* Necessary hardware support for alternate p2m's? */
     bool altp2m_supported;
 
+    /* Hardware virtual interrupt delivery enable? */
+    bool virtual_intr_delivery_enabled;
+
     /* Indicate HAP capabilities. */
     unsigned int hap_capabilities;
 
@@ -195,7 +198,6 @@ struct hvm_function_table {
 
     /* Virtual interrupt delivery */
     void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
-    int (*virtual_intr_delivery_enabled)(void);
     void (*process_isr)(int isr, struct vcpu *v);
     void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
     void (*sync_pir_to_irr)(struct vcpu *v);





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.