[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2] x86/HVM: convert remaining hvm_funcs hook invocations to alt-call


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Thu, 13 Jan 2022 18:02:40 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=4o09gvlURgt6DohoIthTsazujGkFQ0G21bWcFFvWZGc=; b=dKnwPsI8WG8C5AMlC5BQ0n7xK3cIGhiXRb7M4D4c5gnQ/X1S/4/xHgk5w2OOrOXkKXnBgSZHuptXmqEEWyR+O4OvisR3upVA8qOAKDy6FeQt597eqJZ0RkSepgfvtGKZg4kIzijiVpBYs+WdTsh71EtfWnWq6BEGb4G+8hHUQ/HG7ubeJ6PMxj1xApKnIwN7F6eg7CWIBzV4RXS2k4rKSbq+jVWWNvj1Bnu4K+9rLSo9PqWoC+zHG8C4CBmrTf2z1g09M66xZRSGjLbzSf1smCxTrOkHkPM0ug0+c+SRuirTlJ9LS1+irGsFurS10UPlwAA3erdip8Kr7VzYEXZstg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=EcjTN0v+MPe1b7vFv5Y2nikJ+wWZszkduYskalxLQE8Dj2RXpJoFsJq+jKzdkG7xbg/Rm7JlOnhtyBhEUUSB/Nnx1OmJQMLBbiazR4yPiTRhBD4KosqFxnjKdr956ADW/iQRMaBsMLg1zQt01+yr0JcNJRNJezjq700ia0gyI4jRA+2kaXFVNEdD9/Xi9d8avwJoLN49Uqdj3eWeSxkXYBZtpzsX3C24HFMqu8S6+I+BWLs+w0vqrtEZSZNdEcyKEkqklO0TitPROM6LmOAj99EY0Et2XbuR7tjnKafg75wA/TyGEFa8FusnQ131F4PowwM4fHjHvoo/eTFOfTopWg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Paul Durrant <paul@xxxxxxx>, Tamas K Lengyel <tamas@xxxxxxxxxxxxx>, Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Delivery-date: Thu, 13 Jan 2022 17:03:02 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

The aim being to have as few indirect calls as possible (see [1]),
whereas during initial conversion performance was the main aspect and
hence rarely used hooks didn't get converted. Apparently one use of
get_interrupt_shadow() was missed at the time.

While doing this, drop NULL checks ahead of CPU management and .nhvm_*()
calls when the hook is always present. Also convert the
.nhvm_vcpu_reset() call to alternative_vcall(), as the return value is
unused and the caller has currently no way of propagating it.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

[1] https://lists.xen.org/archives/html/xen-devel/2021-11/msg01822.html
---
v2: Convert all hook invocations. Re-base.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2549,7 +2549,7 @@ static int hvmemul_vmfunc(
 
     if ( !hvm_funcs.altp2m_vcpu_emulate_vmfunc )
         return X86EMUL_UNHANDLEABLE;
-    rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
+    rc = alternative_call(hvm_funcs.altp2m_vcpu_emulate_vmfunc, ctxt->regs);
     if ( rc == X86EMUL_EXCEPTION )
         x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
 
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -126,14 +126,14 @@ static int cpu_callback(
     switch ( action )
     {
     case CPU_UP_PREPARE:
-        rc = hvm_funcs.cpu_up_prepare(cpu);
+        rc = alternative_call(hvm_funcs.cpu_up_prepare, cpu);
         break;
     case CPU_DYING:
         hvm_cpu_down();
         break;
     case CPU_UP_CANCELED:
     case CPU_DEAD:
-        hvm_funcs.cpu_dead(cpu);
+        alternative_vcall(hvm_funcs.cpu_dead, cpu);
         break;
     default:
         break;
@@ -707,7 +707,7 @@ int hvm_domain_initialise(struct domain
     if ( rc )
         goto fail2;
 
-    rc = hvm_funcs.domain_initialise(d);
+    rc = alternative_call(hvm_funcs.domain_initialise, d);
     if ( rc != 0 )
         goto fail2;
 
@@ -740,7 +740,7 @@ void hvm_domain_relinquish_resources(str
         alternative_vcall(hvm_funcs.domain_relinquish_resources, d);
 
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
-        hvm_funcs.nhvm_domain_relinquish_resources(d);
+        alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
 
     viridian_domain_deinit(d);
 
@@ -870,7 +870,7 @@ static int hvm_save_cpu_ctxt(struct vcpu
         return 0;
 
     /* Architecture-specific vmcs/vmcb bits */
-    hvm_funcs.save_cpu_ctxt(v, &ctxt);
+    alternative_vcall(hvm_funcs.save_cpu_ctxt, v, &ctxt);
 
     hvm_get_segment_register(v, x86_seg_idtr, &seg);
     ctxt.idtr_limit = seg.limit;
@@ -1093,14 +1093,14 @@ static int hvm_load_cpu_ctxt(struct doma
 #undef UNFOLD_ARBYTES
 
     /* Architecture-specific vmcs/vmcb bits */
-    if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
+    if ( alternative_call(hvm_funcs.load_cpu_ctxt, v, &ctxt) < 0 )
         return -EINVAL;
 
     v->arch.hvm.guest_cr[2] = ctxt.cr2;
     hvm_update_guest_cr(v, 2);
 
     if ( hvm_funcs.tsc_scaling.setup )
-        hvm_funcs.tsc_scaling.setup(v);
+        alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
 
     v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
 
@@ -1563,7 +1563,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
     if ( rc != 0 ) /* teardown: vlapic_destroy */
         goto fail2;
 
-    if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: 
hvm_funcs.vcpu_destroy */
+    rc = alternative_call(hvm_funcs.vcpu_initialise, v);
+    if ( rc != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
         goto fail3;
 
     softirq_tasklet_init(&v->arch.hvm.assert_evtchn_irq_tasklet,
@@ -1611,7 +1612,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     free_compat_arg_xlat(v);
  fail4:
     hvmemul_cache_destroy(v);
-    hvm_funcs.vcpu_destroy(v);
+    alternative_vcall(hvm_funcs.vcpu_destroy, v);
  fail3:
     vlapic_destroy(v);
  fail2:
@@ -1635,7 +1636,7 @@ void hvm_vcpu_destroy(struct vcpu *v)
     free_compat_arg_xlat(v);
 
     tasklet_kill(&v->arch.hvm.assert_evtchn_irq_tasklet);
-    hvm_funcs.vcpu_destroy(v);
+    alternative_vcall(hvm_funcs.vcpu_destroy, v);
 
     vlapic_destroy(v);
 
@@ -3874,7 +3875,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
          !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
         return hvm_intblk_rflags_ie;
 
-    intr_shadow = hvm_funcs.get_interrupt_shadow(v);
+    intr_shadow = alternative_call(hvm_funcs.get_interrupt_shadow, v);
 
     if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
         return hvm_intblk_shadow;
@@ -3990,7 +3991,7 @@ void hvm_vcpu_reset_state(struct vcpu *v
     hvm_set_segment_register(v, x86_seg_idtr, &reg);
 
     if ( hvm_funcs.tsc_scaling.setup )
-        hvm_funcs.tsc_scaling.setup(v);
+        alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
 
     /* Sync AP's TSC with BSP's. */
     v->arch.hvm.cache_tsc_offset =
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -54,8 +54,7 @@ nestedhvm_vcpu_reset(struct vcpu *v)
 
     hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);
 
-    if ( hvm_funcs.nhvm_vcpu_reset )
-        hvm_funcs.nhvm_vcpu_reset(v);
+    alternative_vcall(hvm_funcs.nhvm_vcpu_reset, v);
 
     /* vcpu is in host mode */
     nestedhvm_vcpu_exit_guestmode(v);
@@ -64,14 +63,14 @@ nestedhvm_vcpu_reset(struct vcpu *v)
 int
 nestedhvm_vcpu_initialise(struct vcpu *v)
 {
-    int rc = -EOPNOTSUPP;
+    int rc;
 
     if ( !shadow_io_bitmap[0] )
         return -ENOMEM;
 
-    if ( !hvm_funcs.nhvm_vcpu_initialise ||
-         ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) )
-         return rc;
+    rc = alternative_call(hvm_funcs.nhvm_vcpu_initialise, v);
+    if ( rc )
+        return rc;
 
     nestedhvm_vcpu_reset(v);
     return 0;
@@ -80,8 +79,7 @@ nestedhvm_vcpu_initialise(struct vcpu *v
 void
 nestedhvm_vcpu_destroy(struct vcpu *v)
 {
-    if ( hvm_funcs.nhvm_vcpu_destroy )
-        hvm_funcs.nhvm_vcpu_destroy(v);
+    alternative_vcall(hvm_funcs.nhvm_vcpu_destroy, v);
 }
 
 static void
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -561,13 +561,12 @@ static inline void hvm_invlpg(struct vcp
 
 static inline int hvm_cpu_up(void)
 {
-    return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0);
+    return alternative_call(hvm_funcs.cpu_up);
 }
 
 static inline void hvm_cpu_down(void)
 {
-    if ( hvm_funcs.cpu_down )
-        hvm_funcs.cpu_down();
+    alternative_vcall(hvm_funcs.cpu_down);
 }
 
 static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
@@ -601,7 +600,7 @@ static inline void hvm_invalidate_regs_f
 static inline int nhvm_vcpu_vmexit_event(
     struct vcpu *v, const struct x86_event *event)
 {
-    return hvm_funcs.nhvm_vcpu_vmexit_event(v, event);
+    return alternative_call(hvm_funcs.nhvm_vcpu_vmexit_event, v, event);
 }
 
 /* returns l1 guest's cr3 that points to the page table used to
@@ -609,45 +608,46 @@ static inline int nhvm_vcpu_vmexit_event
  */
 static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
 {
-    return hvm_funcs.nhvm_vcpu_p2m_base(v);
+    return alternative_call(hvm_funcs.nhvm_vcpu_p2m_base, v);
 }
 
 /* returns true, when l1 guest intercepts the specified trap */
 static inline bool_t nhvm_vmcx_guest_intercepts_event(
     struct vcpu *v, unsigned int vector, int errcode)
 {
-    return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode);
+    return alternative_call(hvm_funcs.nhvm_vmcx_guest_intercepts_event, v,
+                            vector, errcode);
 }
 
 /* returns true when l1 guest wants to use hap to run l2 guest */
 static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
 {
-    return hvm_funcs.nhvm_vmcx_hap_enabled(v);
+    return alternative_call(hvm_funcs.nhvm_vmcx_hap_enabled, v);
 }
 
 /* interrupt */
 static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
 {
-    return hvm_funcs.nhvm_intr_blocked(v);
+    return alternative_call(hvm_funcs.nhvm_intr_blocked, v);
 }
 
 static inline int nhvm_hap_walk_L1_p2m(
     struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
     uint8_t *p2m_acc, struct npfec npfec)
 {
-    return hvm_funcs.nhvm_hap_walk_L1_p2m(
+    return alternative_call(hvm_funcs.nhvm_hap_walk_L1_p2m,
         v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
 }
 
 static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
-    hvm_funcs.enable_msr_interception(d, msr);
+    alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
 }
 
 static inline bool_t hvm_is_singlestep_supported(void)
 {
     return (hvm_funcs.is_singlestep_supported &&
-            hvm_funcs.is_singlestep_supported());
+            alternative_call(hvm_funcs.is_singlestep_supported));
 }
 
 static inline bool hvm_hap_supported(void)
@@ -665,14 +665,14 @@ static inline bool hvm_altp2m_supported(
 static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
 {
     if ( hvm_funcs.altp2m_vcpu_update_p2m )
-        hvm_funcs.altp2m_vcpu_update_p2m(v);
+        alternative_vcall(hvm_funcs.altp2m_vcpu_update_p2m, v);
 }
 
 /* updates VMCS fields related to VMFUNC and #VE */
 static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
 {
     if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
-        hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+        alternative_vcall(hvm_funcs.altp2m_vcpu_update_vmfunc_ve, v);
 }
 
 /* emulates #VE */
@@ -680,7 +680,7 @@ static inline bool altp2m_vcpu_emulate_v
 {
     if ( hvm_funcs.altp2m_vcpu_emulate_ve )
     {
-        hvm_funcs.altp2m_vcpu_emulate_ve(v);
+        alternative_vcall(hvm_funcs.altp2m_vcpu_emulate_ve, v);
         return true;
     }
     return false;
@@ -689,7 +689,7 @@ static inline bool altp2m_vcpu_emulate_v
 static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
 {
     if ( hvm_funcs.vmtrace_control )
-        return hvm_funcs.vmtrace_control(v, enable, reset);
+        return alternative_call(hvm_funcs.vmtrace_control, v, enable, reset);
 
     return -EOPNOTSUPP;
 }
@@ -698,7 +698,7 @@ static inline int hvm_vmtrace_control(st
 static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
 {
     if ( hvm_funcs.vmtrace_output_position )
-        return hvm_funcs.vmtrace_output_position(v, pos);
+        return alternative_call(hvm_funcs.vmtrace_output_position, v, pos);
 
     return -EOPNOTSUPP;
 }
@@ -707,7 +707,7 @@ static inline int hvm_vmtrace_set_option
     struct vcpu *v, uint64_t key, uint64_t value)
 {
     if ( hvm_funcs.vmtrace_set_option )
-        return hvm_funcs.vmtrace_set_option(v, key, value);
+        return alternative_call(hvm_funcs.vmtrace_set_option, v, key, value);
 
     return -EOPNOTSUPP;
 }
@@ -716,7 +716,7 @@ static inline int hvm_vmtrace_get_option
     struct vcpu *v, uint64_t key, uint64_t *value)
 {
     if ( hvm_funcs.vmtrace_get_option )
-        return hvm_funcs.vmtrace_get_option(v, key, value);
+        return alternative_call(hvm_funcs.vmtrace_get_option, v, key, value);
 
     return -EOPNOTSUPP;
 }
@@ -724,7 +724,7 @@ static inline int hvm_vmtrace_get_option
 static inline int hvm_vmtrace_reset(struct vcpu *v)
 {
     if ( hvm_funcs.vmtrace_reset )
-        return hvm_funcs.vmtrace_reset(v);
+        return alternative_call(hvm_funcs.vmtrace_reset, v);
 
     return -EOPNOTSUPP;
 }
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -270,7 +270,8 @@ int arch_monitor_domctl_event(struct dom
         ad->monitor.descriptor_access_enabled = requested_status;
 
         for_each_vcpu ( d, v )
-            hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+            alternative_vcall(hvm_funcs.set_descriptor_access_exiting, v,
+                              requested_status);
 
         domain_unpause(d);
         break;
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -204,7 +204,7 @@ void vm_event_fill_regs(vm_event_request
     ASSERT(is_hvm_vcpu(curr));
 
     /* Architecture-specific vmcs/vmcb bits */
-    hvm_funcs.save_cpu_ctxt(curr, &ctxt);
+    alternative_vcall(hvm_funcs.save_cpu_ctxt, curr, &ctxt);
 
     req->data.regs.x86.rax = regs->rax;
     req->data.regs.x86.rcx = regs->rcx;




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.