[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 04/12] x86/HVM: patch indirect calls through hvm_funcs to direct ones



This is intentionally not touching hooks used rarely (or not at all)
during the lifetime of a VM, like {domain,vcpu}_initialise or cpu_up,
as well as nested, VM event, and altp2m ones (they can all be done
later, if so desired). Virtual Interrupt delivery ones will be dealt
with in a subsequent patch.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Drop open-coded numbers from macro invocations. Re-base.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2031,7 +2031,7 @@ static int hvmemul_write_msr(
 static int hvmemul_wbinvd(
     struct x86_emulate_ctxt *ctxt)
 {
-    hvm_funcs.wbinvd_intercept();
+    alternative_vcall(hvm_funcs.wbinvd_intercept);
     return X86EMUL_OKAY;
 }
 
@@ -2049,7 +2049,7 @@ static int hvmemul_get_fpu(
     struct vcpu *curr = current;
 
     if ( !curr->fpu_dirtied )
-        hvm_funcs.fpu_dirty_intercept();
+        alternative_vcall(hvm_funcs.fpu_dirty_intercept);
     else if ( type == X86EMUL_FPU_fpu )
     {
         const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt =
@@ -2166,7 +2166,7 @@ static void hvmemul_put_fpu(
         {
             curr->fpu_dirtied = false;
             stts();
-            hvm_funcs.fpu_leave(curr);
+            alternative_vcall(hvm_funcs.fpu_leave, curr);
         }
     }
 }
@@ -2328,7 +2328,8 @@ static int _hvm_emulate_one(struct hvm_e
     if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
     {
         hvmemul_ctxt->intr_shadow = new_intr_shadow;
-        hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow);
+        alternative_vcall(hvm_funcs.set_interrupt_shadow,
+                          curr, new_intr_shadow);
     }
 
     if ( hvmemul_ctxt->ctxt.retire.hlt &&
@@ -2465,7 +2466,8 @@ void hvm_emulate_init_once(
 
     memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt));
 
-    hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr);
+    hvmemul_ctxt->intr_shadow =
+        alternative_call(hvm_funcs.get_interrupt_shadow, curr);
     hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
     hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -272,12 +272,12 @@ void hvm_set_rdtsc_exiting(struct domain
     struct vcpu *v;
 
     for_each_vcpu ( d, v )
-        hvm_funcs.set_rdtsc_exiting(v, enable);
+        alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
 }
 
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
 {
-    if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
+    if ( !alternative_call(hvm_funcs.get_guest_pat, v, guest_pat) )
         *guest_pat = v->arch.hvm_vcpu.pat_cr;
 }
 
@@ -302,7 +302,7 @@ int hvm_set_guest_pat(struct vcpu *v, u6
             return 0;
         }
 
-    if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
+    if ( !alternative_call(hvm_funcs.set_guest_pat, v, guest_pat) )
         v->arch.hvm_vcpu.pat_cr = guest_pat;
 
     return 1;
@@ -342,7 +342,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
             /* nothing, best effort only */;
     }
 
-    return hvm_funcs.set_guest_bndcfgs(v, val);
+    return alternative_call(hvm_funcs.set_guest_bndcfgs, v, val);
 }
 
 /*
@@ -502,7 +502,8 @@ void hvm_migrate_pirqs(struct vcpu *v)
 static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
 {
     info->cr2 = v->arch.hvm_vcpu.guest_cr[2];
-    return hvm_funcs.get_pending_event(v, info);
+
+    return alternative_call(hvm_funcs.get_pending_event, v, info);
 }
 
 void hvm_do_resume(struct vcpu *v)
@@ -1683,7 +1684,7 @@ void hvm_inject_event(const struct x86_e
         }
     }
 
-    hvm_funcs.inject_event(event);
+    alternative_vcall(hvm_funcs.inject_event, event);
 }
 
 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
@@ -2270,7 +2271,7 @@ int hvm_set_cr0(unsigned long value, boo
          (!rangeset_is_empty(d->iomem_caps) ||
           !rangeset_is_empty(d->arch.ioport_caps) ||
           has_arch_pdevs(d)) )
-        hvm_funcs.handle_cd(v, value);
+        alternative_vcall(hvm_funcs.handle_cd, v, value);
 
     hvm_update_cr(v, 0, value);
 
@@ -3512,7 +3513,8 @@ int hvm_msr_read_intercept(unsigned int
             goto gp_fault;
         /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
         ret = ((ret == 0)
-               ? hvm_funcs.msr_read_intercept(msr, msr_content)
+               ? alternative_call(hvm_funcs.msr_read_intercept,
+                                  msr, msr_content)
                : X86EMUL_OKAY);
         break;
     }
@@ -3672,7 +3674,8 @@ int hvm_msr_write_intercept(unsigned int
             goto gp_fault;
         /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
         ret = ((ret == 0)
-               ? hvm_funcs.msr_write_intercept(msr, msr_content)
+               ? alternative_call(hvm_funcs.msr_write_intercept,
+                                  msr, msr_content)
                : X86EMUL_OKAY);
         break;
     }
@@ -3864,7 +3867,7 @@ void hvm_hypercall_page_initialise(struc
                                    void *hypercall_page)
 {
     hvm_latch_shinfo_size(d);
-    hvm_funcs.init_hypercall_page(d, hypercall_page);
+    alternative_vcall(hvm_funcs.init_hypercall_page, d, hypercall_page);
 }
 
 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
@@ -5019,7 +5022,7 @@ void hvm_domain_soft_reset(struct domain
 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                               struct segment_register *reg)
 {
-    hvm_funcs.get_segment_register(v, seg, reg);
+    alternative_vcall(hvm_funcs.get_segment_register, v, seg, reg);
 
     switch ( seg )
     {
@@ -5165,7 +5168,7 @@ void hvm_set_segment_register(struct vcp
         return;
     }
 
-    hvm_funcs.set_segment_register(v, seg, reg);
+    alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
 }
 
 /*
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -314,42 +314,42 @@ static inline int
 hvm_guest_x86_mode(struct vcpu *v)
 {
     ASSERT(v == current);
-    return hvm_funcs.guest_x86_mode(v);
+    return alternative_call(hvm_funcs.guest_x86_mode, v);
 }
 
 static inline void
 hvm_update_host_cr3(struct vcpu *v)
 {
     if ( hvm_funcs.update_host_cr3 )
-        hvm_funcs.update_host_cr3(v);
+        alternative_vcall(hvm_funcs.update_host_cr3, v);
 }
 
 static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
 {
-    hvm_funcs.update_guest_cr(v, cr, 0);
+    alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0);
 }
 
 static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
 {
     unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;
 
-    hvm_funcs.update_guest_cr(v, 3, flags);
+    alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags);
 }
 
 static inline void hvm_update_guest_efer(struct vcpu *v)
 {
-    hvm_funcs.update_guest_efer(v);
+    alternative_vcall(hvm_funcs.update_guest_efer, v);
 }
 
 static inline void hvm_cpuid_policy_changed(struct vcpu *v)
 {
-    hvm_funcs.cpuid_policy_changed(v);
+    alternative_vcall(hvm_funcs.cpuid_policy_changed, v);
 }
 
 static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
                                       uint64_t at_tsc)
 {
-    hvm_funcs.set_tsc_offset(v, offset, at_tsc);
+    alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc);
 }
 
 /*
@@ -369,7 +369,7 @@ void hvm_hypercall_page_initialise(struc
 static inline unsigned int
 hvm_get_cpl(struct vcpu *v)
 {
-    return hvm_funcs.get_cpl(v);
+    return alternative_call(hvm_funcs.get_cpl, v);
 }
 
 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
@@ -379,13 +379,13 @@ void hvm_set_segment_register(struct vcp
 
 static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
 {
-    return hvm_funcs.get_shadow_gs_base(v);
+    return alternative_call(hvm_funcs.get_shadow_gs_base, v);
 }
 
 static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
 {
     return hvm_funcs.get_guest_bndcfgs &&
-           hvm_funcs.get_guest_bndcfgs(v, val);
+           alternative_call(hvm_funcs.get_guest_bndcfgs, v, val);
 }
 
 bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
@@ -451,12 +451,12 @@ static inline void hvm_inject_page_fault
 
 static inline int hvm_event_pending(struct vcpu *v)
 {
-    return hvm_funcs.event_pending(v);
+    return alternative_call(hvm_funcs.event_pending, v);
 }
 
 static inline void hvm_invlpg(struct vcpu *v, unsigned long va)
 {
-    hvm_funcs.invlpg(v, va);
+    alternative_vcall(hvm_funcs.invlpg, v, va);
 }
 
 /* These bits in CR4 are owned by the host. */
@@ -487,7 +487,8 @@ static inline void hvm_cpu_down(void)
 
 static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
 {
-    return (hvm_funcs.get_insn_bytes ? hvm_funcs.get_insn_bytes(v, buf) : 0);
+    return (hvm_funcs.get_insn_bytes
+            ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
 }
 
 enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
@@ -519,7 +520,7 @@ void hvm_mapped_guest_frames_mark_dirty(
 static inline void hvm_set_info_guest(struct vcpu *v)
 {
     if ( hvm_funcs.set_info_guest )
-        return hvm_funcs.set_info_guest(v);
+        alternative_vcall(hvm_funcs.set_info_guest, v);
 }
 
 int hvm_debug_op(struct vcpu *v, int32_t op);




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.