[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 02/12] x86/HVM: patch indirect calls through hvm_funcs to direct ones



> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: 02 October 2018 11:13
> To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>
> Subject: [PATCH v4 02/12] x86/HVM: patch indirect calls through hvm_funcs
> to direct ones
> 
> This is intentionally not touching hooks used rarely (or not at all)
> during the lifetime of a VM, like {domain,vcpu}_initialise or cpu_up,
> as well as nested, VM event, and altp2m ones (they can all be done
> later, if so desired). Virtual Interrupt delivery ones will be dealt
> with in a subsequent patch.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> ---
> v3: Re-base.
> v2: Drop open-coded numbers from macro invocations. Re-base.
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -2104,7 +2104,7 @@ static int hvmemul_write_msr(
>  static int hvmemul_wbinvd(
>      struct x86_emulate_ctxt *ctxt)
>  {
> -    hvm_funcs.wbinvd_intercept();
> +    alternative_vcall(hvm_funcs.wbinvd_intercept);
>      return X86EMUL_OKAY;
>  }
> 
> @@ -2122,7 +2122,7 @@ static int hvmemul_get_fpu(
>      struct vcpu *curr = current;
> 
>      if ( !curr->fpu_dirtied )
> -        hvm_funcs.fpu_dirty_intercept();
> +        alternative_vcall(hvm_funcs.fpu_dirty_intercept);
>      else if ( type == X86EMUL_FPU_fpu )
>      {
>          const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt =
> @@ -2239,7 +2239,7 @@ static void hvmemul_put_fpu(
>          {
>              curr->fpu_dirtied = false;
>              stts();
> -            hvm_funcs.fpu_leave(curr);
> +            alternative_vcall(hvm_funcs.fpu_leave, curr);
>          }
>      }
>  }
> @@ -2401,7 +2401,8 @@ static int _hvm_emulate_one(struct hvm_e
>      if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
>      {
>          hvmemul_ctxt->intr_shadow = new_intr_shadow;
> -        hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow);
> +        alternative_vcall(hvm_funcs.set_interrupt_shadow,
> +                          curr, new_intr_shadow);
>      }
> 
>      if ( hvmemul_ctxt->ctxt.retire.hlt &&
> @@ -2538,7 +2539,8 @@ void hvm_emulate_init_once(
> 
>      memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt));
> 
> -    hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr);
> +    hvmemul_ctxt->intr_shadow =
> +        alternative_call(hvm_funcs.get_interrupt_shadow, curr);
>      hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
>      hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
> 
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -272,12 +272,12 @@ void hvm_set_rdtsc_exiting(struct domain
>      struct vcpu *v;
> 
>      for_each_vcpu ( d, v )
> -        hvm_funcs.set_rdtsc_exiting(v, enable);
> +        alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
>  }
> 
>  void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
>  {
> -    if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
> +    if ( !alternative_call(hvm_funcs.get_guest_pat, v, guest_pat) )
>          *guest_pat = v->arch.hvm.pat_cr;
>  }
> 
> @@ -302,7 +302,7 @@ int hvm_set_guest_pat(struct vcpu *v, u6
>              return 0;
>          }
> 
> -    if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
> +    if ( !alternative_call(hvm_funcs.set_guest_pat, v, guest_pat) )
>          v->arch.hvm.pat_cr = guest_pat;
> 
>      return 1;
> @@ -342,7 +342,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
>              /* nothing, best effort only */;
>      }
> 
> -    return hvm_funcs.set_guest_bndcfgs(v, val);
> +    return alternative_call(hvm_funcs.set_guest_bndcfgs, v, val);
>  }
> 
>  /*
> @@ -500,7 +500,8 @@ void hvm_migrate_pirqs(struct vcpu *v)
>  static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
>  {
>      info->cr2 = v->arch.hvm.guest_cr[2];
> -    return hvm_funcs.get_pending_event(v, info);
> +
> +    return alternative_call(hvm_funcs.get_pending_event, v, info);
>  }
> 
>  void hvm_do_resume(struct vcpu *v)
> @@ -1651,7 +1652,7 @@ void hvm_inject_event(const struct x86_e
>          }
>      }
> 
> -    hvm_funcs.inject_event(event);
> +    alternative_vcall(hvm_funcs.inject_event, event);
>  }
> 
>  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
> @@ -2238,7 +2239,7 @@ int hvm_set_cr0(unsigned long value, boo
>           (!rangeset_is_empty(d->iomem_caps) ||
>            !rangeset_is_empty(d->arch.ioport_caps) ||
>            has_arch_pdevs(d)) )
> -        hvm_funcs.handle_cd(v, value);
> +        alternative_vcall(hvm_funcs.handle_cd, v, value);
> 
>      hvm_update_cr(v, 0, value);
> 
> @@ -3477,7 +3478,8 @@ int hvm_msr_read_intercept(unsigned int
>              goto gp_fault;
>          /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
>          ret = ((ret == 0)
> -               ? hvm_funcs.msr_read_intercept(msr, msr_content)
> +               ? alternative_call(hvm_funcs.msr_read_intercept,
> +                                  msr, msr_content)
>                 : X86EMUL_OKAY);
>          break;
>      }
> @@ -3637,7 +3639,8 @@ int hvm_msr_write_intercept(unsigned int
>              goto gp_fault;
>          /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
>          ret = ((ret == 0)
> -               ? hvm_funcs.msr_write_intercept(msr, msr_content)
> +               ? alternative_call(hvm_funcs.msr_write_intercept,
> +                                  msr, msr_content)
>                 : X86EMUL_OKAY);
>          break;
>      }
> @@ -3829,7 +3832,7 @@ void hvm_hypercall_page_initialise(struc
>                                     void *hypercall_page)
>  {
>      hvm_latch_shinfo_size(d);
> -    hvm_funcs.init_hypercall_page(d, hypercall_page);
> +    alternative_vcall(hvm_funcs.init_hypercall_page, d, hypercall_page);
>  }
> 
>  void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
> @@ -5004,7 +5007,7 @@ void hvm_domain_soft_reset(struct domain
>  void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
>                                struct segment_register *reg)
>  {
> -    hvm_funcs.get_segment_register(v, seg, reg);
> +    alternative_vcall(hvm_funcs.get_segment_register, v, seg, reg);
> 
>      switch ( seg )
>      {
> @@ -5150,7 +5153,7 @@ void hvm_set_segment_register(struct vcp
>          return;
>      }
> 
> -    hvm_funcs.set_segment_register(v, seg, reg);
> +    alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
>  }
> 
>  /*
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -383,42 +383,42 @@ static inline int
>  hvm_guest_x86_mode(struct vcpu *v)
>  {
>      ASSERT(v == current);
> -    return hvm_funcs.guest_x86_mode(v);
> +    return alternative_call(hvm_funcs.guest_x86_mode, v);
>  }
> 
>  static inline void
>  hvm_update_host_cr3(struct vcpu *v)
>  {
>      if ( hvm_funcs.update_host_cr3 )
> -        hvm_funcs.update_host_cr3(v);
> +        alternative_vcall(hvm_funcs.update_host_cr3, v);
>  }
> 
>  static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
>  {
> -    hvm_funcs.update_guest_cr(v, cr, 0);
> +    alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0);
>  }
> 
>  static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
>  {
>      unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;
> 
> -    hvm_funcs.update_guest_cr(v, 3, flags);
> +    alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags);
>  }
> 
>  static inline void hvm_update_guest_efer(struct vcpu *v)
>  {
> -    hvm_funcs.update_guest_efer(v);
> +    alternative_vcall(hvm_funcs.update_guest_efer, v);
>  }
> 
>  static inline void hvm_cpuid_policy_changed(struct vcpu *v)
>  {
> -    hvm_funcs.cpuid_policy_changed(v);
> +    alternative_vcall(hvm_funcs.cpuid_policy_changed, v);
>  }
> 
>  static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
>                                        uint64_t at_tsc)
>  {
> -    hvm_funcs.set_tsc_offset(v, offset, at_tsc);
> +    alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc);
>  }
> 
>  /*
> @@ -435,18 +435,18 @@ static inline void hvm_flush_guest_tlbs(
>  static inline unsigned int
>  hvm_get_cpl(struct vcpu *v)
>  {
> -    return hvm_funcs.get_cpl(v);
> +    return alternative_call(hvm_funcs.get_cpl, v);
>  }
> 
>  static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
>  {
> -    return hvm_funcs.get_shadow_gs_base(v);
> +    return alternative_call(hvm_funcs.get_shadow_gs_base, v);
>  }
> 
>  static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
>  {
>      return hvm_funcs.get_guest_bndcfgs &&
> -           hvm_funcs.get_guest_bndcfgs(v, val);
> +           alternative_call(hvm_funcs.get_guest_bndcfgs, v, val);
>  }
> 
>  #define has_hvm_params(d) \
> @@ -503,12 +503,12 @@ static inline void hvm_inject_page_fault
> 
>  static inline int hvm_event_pending(struct vcpu *v)
>  {
> -    return hvm_funcs.event_pending(v);
> +    return alternative_call(hvm_funcs.event_pending, v);
>  }
> 
>  static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
>  {
> -    hvm_funcs.invlpg(v, linear);
> +    alternative_vcall(hvm_funcs.invlpg, v, linear);
>  }
> 
>  /* These bits in CR4 are owned by the host. */
> @@ -533,13 +533,14 @@ static inline void hvm_cpu_down(void)
> 
>  static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t
> *buf)
>  {
> -    return (hvm_funcs.get_insn_bytes ? hvm_funcs.get_insn_bytes(v, buf) :
> 0);
> +    return (hvm_funcs.get_insn_bytes
> +            ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
>  }
> 
>  static inline void hvm_set_info_guest(struct vcpu *v)
>  {
>      if ( hvm_funcs.set_info_guest )
> -        return hvm_funcs.set_info_guest(v);
> +        alternative_vcall(hvm_funcs.set_info_guest, v);
>  }
> 
>  static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.