[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 11/16] x86/HVM: patch vINTR indirect calls through hvm_funcs to direct ones



While not strictly necessary, change the VMX initialization logic to
update the function table in start_vmx() from NULL rather than to NULL,
to make more obvious that we won't ever change an already (explictly)
initialized function pointer.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -111,10 +111,15 @@ static void vlapic_clear_irr(int vector,
     vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
 }
 
-static int vlapic_find_highest_irr(struct vlapic *vlapic)
+static void sync_pir_to_irr(struct vcpu *v)
 {
     if ( hvm_funcs.sync_pir_to_irr )
-        hvm_funcs.sync_pir_to_irr(vlapic_vcpu(vlapic));
+        alternative_vcall1(hvm_funcs.sync_pir_to_irr, v);
+}
+
+static int vlapic_find_highest_irr(struct vlapic *vlapic)
+{
+    sync_pir_to_irr(vlapic_vcpu(vlapic));
 
     return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
 }
@@ -143,7 +148,7 @@ bool vlapic_test_irq(const struct vlapic
         return false;
 
     if ( hvm_funcs.test_pir &&
-         hvm_funcs.test_pir(const_vlapic_vcpu(vlapic), vec) )
+         alternative_call2(hvm_funcs.test_pir, const_vlapic_vcpu(vlapic), vec) 
)
         return true;
 
     return vlapic_test_vector(vec, &vlapic->regs->data[APIC_IRR]);
@@ -165,10 +170,10 @@ void vlapic_set_irq(struct vlapic *vlapi
         vlapic_clear_vector(vec, &vlapic->regs->data[APIC_TMR]);
 
     if ( hvm_funcs.update_eoi_exit_bitmap )
-        hvm_funcs.update_eoi_exit_bitmap(target, vec, trig);
+        alternative_vcall3(hvm_funcs.update_eoi_exit_bitmap, target, vec, 
trig);
 
     if ( hvm_funcs.deliver_posted_intr )
-        hvm_funcs.deliver_posted_intr(target, vec);
+        alternative_vcall2(hvm_funcs.deliver_posted_intr, target, vec);
     else if ( !vlapic_test_and_set_irr(vec, vlapic) )
         vcpu_kick(target);
 }
@@ -448,7 +453,7 @@ void vlapic_EOI_set(struct vlapic *vlapi
     vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
 
     if ( hvm_funcs.handle_eoi )
-        hvm_funcs.handle_eoi(vector);
+        alternative_vcall1(hvm_funcs.handle_eoi, vector);
 
     vlapic_handle_EOI(vlapic, vector);
 
@@ -1457,8 +1462,7 @@ static int lapic_save_regs(struct domain
 
     for_each_vcpu ( d, v )
     {
-        if ( hvm_funcs.sync_pir_to_irr )
-            hvm_funcs.sync_pir_to_irr(v);
+        sync_pir_to_irr(v);
 
         s = vcpu_vlapic(v);
         if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
@@ -1561,7 +1565,8 @@ static int lapic_load_regs(struct domain
         lapic_load_fixup(s);
 
     if ( hvm_funcs.process_isr )
-        hvm_funcs.process_isr(vlapic_find_highest_isr(s), v);
+        alternative_vcall2(hvm_funcs.process_isr,
+                           vlapic_find_highest_isr(s), v);
 
     vlapic_adjust_i8259_target(d);
     lapic_rearm(s);
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2280,12 +2280,6 @@ static struct hvm_function_table __initd
     .nhvm_vcpu_vmexit_event = nvmx_vmexit_event,
     .nhvm_intr_blocked    = nvmx_intr_blocked,
     .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
-    .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
-    .process_isr          = vmx_process_isr,
-    .deliver_posted_intr  = vmx_deliver_posted_intr,
-    .sync_pir_to_irr      = vmx_sync_pir_to_irr,
-    .test_pir             = vmx_test_pir,
-    .handle_eoi           = vmx_handle_eoi,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
     .enable_msr_interception = vmx_enable_msr_interception,
     .is_singlestep_supported = vmx_is_singlestep_supported,
@@ -2413,26 +2407,23 @@ const struct hvm_function_table * __init
         setup_ept_dump();
     }
 
-    if ( !cpu_has_vmx_virtual_intr_delivery )
+    if ( cpu_has_vmx_virtual_intr_delivery )
     {
-        vmx_function_table.update_eoi_exit_bitmap = NULL;
-        vmx_function_table.process_isr = NULL;
-        vmx_function_table.handle_eoi = NULL;
-    }
-    else
+        vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
+        vmx_function_table.process_isr = vmx_process_isr;
+        vmx_function_table.handle_eoi = vmx_handle_eoi;
         vmx_function_table.virtual_intr_delivery_enabled = true;
+    }
 
     if ( cpu_has_vmx_posted_intr_processing )
     {
         alloc_direct_apic_vector(&posted_intr_vector, 
pi_notification_interrupt);
         if ( iommu_intpost )
             alloc_direct_apic_vector(&pi_wakeup_vector, pi_wakeup_interrupt);
-    }
-    else
-    {
-        vmx_function_table.deliver_posted_intr = NULL;
-        vmx_function_table.sync_pir_to_irr = NULL;
-        vmx_function_table.test_pir = NULL;
+
+        vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
+        vmx_function_table.sync_pir_to_irr     = vmx_sync_pir_to_irr;
+        vmx_function_table.test_pir            = vmx_test_pir;
     }
 
     if ( cpu_has_vmx_tsc_scaling )




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.