[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 01/16] x86: change name of parameter for various invlpg functions


  • To: Wei Liu <wei.liu2@xxxxxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: George Dunlap <george.dunlap@xxxxxxxxxx>
  • Date: Thu, 6 Sep 2018 12:12:41 +0100
  • Autocrypt: addr=george.dunlap@xxxxxxxxxx; prefer-encrypt=mutual; keydata= xsFNBFPqG+MBEACwPYTQpHepyshcufo0dVmqxDo917iWPslB8lauFxVf4WZtGvQSsKStHJSj 92Qkxp4CH2DwudI8qpVbnWCXsZxodDWac9c3PordLwz5/XL41LevEoM3NWRm5TNgJ3ckPA+J K5OfSK04QtmwSHFP3G/SXDJpGs+oDJgASta2AOl9vPV+t3xG6xyfa2NMGn9wmEvvVMD44Z7R W3RhZPn/NEZ5gaJhIUMgTChGwwWDOX0YPY19vcy5fT4bTIxvoZsLOkLSGoZb/jHIzkAAznug Q7PPeZJ1kXpbW9EHHaUHiCD9C87dMyty0N3TmWfp0VvBCaw32yFtM9jUgB7UVneoZUMUKeHA fgIXhJ7I7JFmw3J0PjGLxCLHf2Q5JOD8jeEXpdxugqF7B/fWYYmyIgwKutiGZeoPhl9c/7RE Bf6f9Qv4AtQoJwtLw6+5pDXsTD5q/GwhPjt7ohF7aQZTMMHhZuS52/izKhDzIufl6uiqUBge 0lqG+/ViLKwCkxHDREuSUTtfjRc9/AoAt2V2HOfgKORSCjFC1eI0+8UMxlfdq2z1AAchinU0 eSkRpX2An3CPEjgGFmu2Je4a/R/Kd6nGU8AFaE8ta0oq5BSFDRYdcKchw4TSxetkG6iUtqOO ZFS7VAdF00eqFJNQpi6IUQryhnrOByw+zSobqlOPUO7XC5fjnwARAQABzSRHZW9yZ2UgVy4g RHVubGFwIDxkdW5sYXBnQHVtaWNoLmVkdT7CwYAEEwEKACoCGwMFCwkIBwMFFQoJCAsFFgID AQACHgECF4ACGQEFAlpk2IEFCQo9I54ACgkQpjY8MQWQtG1A1BAAnc0oX3+M/jyv4j/ESJTO U2JhuWUWV6NFuzU10pUmMqpgQtiVEVU2QbCvTcZS1U/S6bqAUoiWQreDMSSgGH3a3BmRNi8n HKtarJqyK81aERM2HrjYkC1ZlRYG+jS8oWzzQrCQiTwn3eFLJrHjqowTbwahoiMw/nJ+OrZO /VXLfNeaxA5GF6emwgbpshwaUtESQ/MC5hFAFmUBZKAxp9CXG2ZhTP6ROV4fwhpnHaz8z+BT NQz8YwA4gkmFJbDUA9I0Cm9D/EZscrCGMeaVvcyldbMhWS+aH8nbqv6brhgbJEQS22eKCZDD J/ng5ea25QnS0fqu3bMrH39tDqeh7rVnt8Yu/YgOwc3XmgzmAhIDyzSinYEWJ1FkOVpIbGl9 uR6seRsfJmUK84KCScjkBhMKTOixWgNEQ/zTcLUsfTh6KQdLTn083Q5aFxWOIal2hiy9UyqR VQydowXy4Xx58rqvZjuYzdGDdAUlZ+D2O3Jp28ez5SikA/ZaaoGI9S1VWvQsQdzNfD2D+xfL qfd9yv7gko9eTJzv5zFr2MedtRb/nCrMTnvLkwNX4abB5+19JGneeRU4jy7yDYAhUXcI/waS /hHioT9MOjMh+DoLCgeZJYaOcgQdORY/IclLiLq4yFnG+4Ocft8igp79dbYYHkAkmC9te/2x Kq9nEd0Hg288EO/OwE0EVFq6vQEIAO2idItaUEplEemV2Q9mBA8YmtgckdLmaE0uzdDWL9To 1PL+qdNe7tBXKOfkKI7v32fe0nB4aecRlQJOZMWQRQ0+KLyXdJyHkq9221sHzcxsdcGs7X3c 17ep9zASq+wIYqAdZvr7pN9a3nVHZ4W7bzezuNDAvn4EpOf/o0RsWNyDlT6KECs1DuzOdRqD oOMJfYmtx9hMzqBoTdr6U20/KgnC/dmWWcJAUZXaAFp+3NYRCkk7k939VaUpoY519CeLrymd Vdke66KCiWBQXMkgtMGvGk5gLQLy4H3KXvpXoDrYKgysy7jeOccxI8owoiOdtbfM8TTDyWPR Ygjzb9LApA8AEQEAAcLBZQQYAQoADwIbDAUCWmTXMwUJB+tP9gAKCRCmNjwxBZC0bb+2D/9h jn1k5WcRHlu19WGuH6q0Kgm1LRT7PnnSz904igHNElMB5a7wRjw5kdNwU3sRm2nnmHeOJH8k Yj2Hn1QgX5SqQsysWTHWOEseGeoXydx9zZZkt3oQJM+9NV1VjK0bOXwqhiQyEUWz5/9l467F S/k4FJ5CHNRumvhLa0l2HEEu5pxq463HQZHDt4YE/9Y74eXOnYCB4nrYxQD/GSXEZvWryEWr eDoaFqzq1TKtzHhFgQG7yFUEepxLRUUtYsEpT6Rks2l4LCqG3hVD0URFIiTyuxJx3VC2Ta4L H3hxQtiaIpuXqq2D4z63h6vCx2wxfZc/WRHGbr4NAlB81l35Q/UHyMocVuYLj0llF0rwU4Aj iKZ5qWNSEdvEpL43fTvZYxQhDCjQTKbb38omu5P4kOf1HT7s+kmQKRtiLBlqHzK17D4K/180 ADw7a3gnmr5RumcZP3NGSSZA6jP5vNqQpNu4gqrPFWNQKQcW8HBiYFgq6SoLQQWbRxJDHvTR YJ2ms7oCe870gh4D1wFFqTLeyXiVqjddENGNaP8ZlCDw6EU82N8Bn5LXKjR1GWo2UK3CjrkH pTt3YYZvrhS2MO2EYEcWjyu6LALF/lS6z6LKeQZ+t9AdQUcILlrx9IxqXv6GvAoBLJY1jjGB q+/kRPrWXpoaQn7FXWGfMqU+NkY9enyrlw==
  • Cc: Kevin Tian <kevin.tian@xxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Tim Deegan <tim@xxxxxxx>, Jun Nakajima <jun.nakajima@xxxxxxxxx>, Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>, Brian Woods <brian.woods@xxxxxxx>, Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
  • Delivery-date: Thu, 06 Sep 2018 11:12:55 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Openpgp: preference=signencrypt

On 09/04/2018 05:15 PM, Wei Liu wrote:
> They all incorrectly named a parameter virtual address while it should
> have been linear address.
> 
> Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> Acked-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> ---
>  xen/arch/x86/hvm/svm/svm.c         | 14 +++++++-------
>  xen/arch/x86/hvm/vmx/vmx.c         | 12 ++++++------
>  xen/arch/x86/mm.c                  | 10 +++++-----
>  xen/arch/x86/mm/hap/hap.c          |  2 +-
>  xen/arch/x86/mm/shadow/multi.c     | 14 +++++++-------
>  xen/arch/x86/mm/shadow/none.c      |  2 +-
>  xen/include/asm-x86/hvm/hvm.h      |  6 +++---
>  xen/include/asm-x86/hvm/svm/asid.h |  4 ++--
>  xen/include/asm-x86/hvm/svm/svm.h  |  4 ++--
>  xen/include/asm-x86/paging.h       |  3 ++-
>  10 files changed, 36 insertions(+), 35 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 0b06e2ff11..34d55b4938 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -2488,18 +2488,18 @@ static void svm_vmexit_do_invalidate_cache(struct 
> cpu_user_regs *regs)
>  }
>  
>  static void svm_invlpga_intercept(
> -    struct vcpu *v, unsigned long vaddr, uint32_t asid)
> +    struct vcpu *v, unsigned long linear, uint32_t asid)
>  {
> -    svm_invlpga(vaddr,
> +    svm_invlpga(linear,
>                  (asid == 0)
>                  ? v->arch.hvm.n1asid.asid
>                  : vcpu_nestedhvm(v).nv_n2asid.asid);
>  }
>  
> -static void svm_invlpg_intercept(unsigned long vaddr)
> +static void svm_invlpg_intercept(unsigned long linear)
>  {
> -    HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
> -    paging_invlpg(current, vaddr);
> +    HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear));
> +    paging_invlpg(current, linear);
>  }
>  
>  static bool is_invlpg(const struct x86_emulate_state *state,
> @@ -2512,9 +2512,9 @@ static bool is_invlpg(const struct x86_emulate_state 
> *state,
>             (ext & 7) == 7;
>  }
>  
> -static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
> +static void svm_invlpg(struct vcpu *v, unsigned long linear)
>  {
> -    svm_asid_g_invlpg(v, vaddr);
> +    svm_asid_g_invlpg(v, linear);
>  }
>  
>  static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index e926b0b28e..b2e1a28038 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -75,7 +75,7 @@ static void vmx_wbinvd_intercept(void);
>  static void vmx_fpu_dirty_intercept(void);
>  static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
>  static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
> -static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
> +static void vmx_invlpg(struct vcpu *v, unsigned long linear);
>  
>  /* Values for domain's ->arch.hvm_domain.pi_ops.flags. */
>  #define PI_CSW_FROM (1u << 0)
> @@ -2595,16 +2595,16 @@ static void vmx_dr_access(unsigned long 
> exit_qualification,
>      vmx_update_cpu_exec_control(v);
>  }
>  
> -static void vmx_invlpg_intercept(unsigned long vaddr)
> +static void vmx_invlpg_intercept(unsigned long linear)
>  {
> -    HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
> -    paging_invlpg(current, vaddr);
> +    HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear));
> +    paging_invlpg(current, linear);
>  }
>  
> -static void vmx_invlpg(struct vcpu *v, unsigned long vaddr)
> +static void vmx_invlpg(struct vcpu *v, unsigned long linear)
>  {
>      if ( cpu_has_vmx_vpid )
> -        vpid_sync_vcpu_gva(v, vaddr);
> +        vpid_sync_vcpu_gva(v, linear);
>  }
>  
>  static int vmx_vmfunc_intercept(struct cpu_user_regs *regs)
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 84979f28d5..409814ce0a 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5793,19 +5793,19 @@ const unsigned long *__init 
> get_platform_badpages(unsigned int *array_size)
>      return bad_pages;
>  }
>  
> -void paging_invlpg(struct vcpu *v, unsigned long va)
> +void paging_invlpg(struct vcpu *v, unsigned long linear)
>  {
> -    if ( !is_canonical_address(va) )
> +    if ( !is_canonical_address(linear) )
>          return;
>  
>      if ( paging_mode_enabled(v->domain) &&
> -         !paging_get_hostmode(v)->invlpg(v, va) )
> +         !paging_get_hostmode(v)->invlpg(v, linear) )
>          return;
>  
>      if ( is_pv_vcpu(v) )
> -        flush_tlb_one_local(va);
> +        flush_tlb_one_local(linear);
>      else
> -        hvm_invlpg(v, va);
> +        hvm_invlpg(v, linear);
>  }
>  
>  /* Build a 32bit PSE page table using 4MB pages. */
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index c53d76cf69..3d651b94c3 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -650,7 +650,7 @@ static int hap_page_fault(struct vcpu *v, unsigned long 
> va,
>   * should not be intercepting it.  However, we need to correctly handle
>   * getting here from instruction emulation.
>   */
> -static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
> +static bool_t hap_invlpg(struct vcpu *v, unsigned long linear)
>  {
>      /*
>       * Emulate INVLPGA:
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index 7bb6f47155..bba573ae87 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -3549,7 +3549,7 @@ propagate:
>   * instruction should be issued on the hardware, or false if it's safe not
>   * to do so.
>   */
> -static bool sh_invlpg(struct vcpu *v, unsigned long va)
> +static bool sh_invlpg(struct vcpu *v, unsigned long linear)
>  {
>      mfn_t sl1mfn;
>      shadow_l2e_t sl2e;
> @@ -3572,14 +3572,14 @@ static bool sh_invlpg(struct vcpu *v, unsigned long 
> va)
>      {
>          shadow_l3e_t sl3e;
>          if ( !(shadow_l4e_get_flags(
> -                   sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
> +                   sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)])
>                 & _PAGE_PRESENT) )
>              return false;
>          /* This must still be a copy-from-user because we don't have the
>           * paging lock, and the higher-level shadows might disappear
>           * under our feet. */
>          if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
> -                                      + shadow_l3_linear_offset(va)),
> +                                      + shadow_l3_linear_offset(linear)),
>                                sizeof (sl3e)) != 0 )
>          {
>              perfc_incr(shadow_invlpg_fault);
> @@ -3589,7 +3589,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
>              return false;
>      }
>  #else /* SHADOW_PAGING_LEVELS == 3 */
> -    if ( 
> !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
> +    if ( 
> !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)])
>             & _PAGE_PRESENT) )
>          // no need to flush anything if there's no SL2...
>          return false;
> @@ -3598,7 +3598,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
>      /* This must still be a copy-from-user because we don't have the shadow
>       * lock, and the higher-level shadows might disappear under our feet. */
>      if ( __copy_from_user(&sl2e,
> -                          sh_linear_l2_table(v) + 
> shadow_l2_linear_offset(va),
> +                          sh_linear_l2_table(v) + 
> shadow_l2_linear_offset(linear),
>                            sizeof (sl2e)) != 0 )
>      {
>          perfc_incr(shadow_invlpg_fault);
> @@ -3642,7 +3642,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
>               * feet. */
>              if ( __copy_from_user(&sl2e,
>                                    sh_linear_l2_table(v)
> -                                  + shadow_l2_linear_offset(va),
> +                                  + shadow_l2_linear_offset(linear),
>                                    sizeof (sl2e)) != 0 )
>              {
>                  perfc_incr(shadow_invlpg_fault);
> @@ -3664,7 +3664,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
>                          && page_is_out_of_sync(pg) ) )
>              {
>                  shadow_l1e_t *sl1;
> -                sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(va);
> +                sl1 = sh_linear_l1_table(v) + 
> shadow_l1_linear_offset(linear);
>                  /* Remove the shadow entry that maps this VA */
>                  (void) shadow_set_l1e(d, sl1, shadow_l1e_empty(),
>                                        p2m_invalid, sl1mfn);
> diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
> index a8c9604cdf..4de645a433 100644
> --- a/xen/arch/x86/mm/shadow/none.c
> +++ b/xen/arch/x86/mm/shadow/none.c
> @@ -37,7 +37,7 @@ static int _page_fault(struct vcpu *v, unsigned long va,
>      return 0;
>  }
>  
> -static bool _invlpg(struct vcpu *v, unsigned long va)
> +static bool _invlpg(struct vcpu *v, unsigned long linear)
>  {
>      ASSERT_UNREACHABLE();
>      return true;
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 132e62b4f6..6b0e088750 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -160,7 +160,7 @@ struct hvm_function_table {
>  
>      int  (*event_pending)(struct vcpu *v);
>      bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
> -    void (*invlpg)(struct vcpu *v, unsigned long vaddr);
> +    void (*invlpg)(struct vcpu *v, unsigned long linear);
>  
>      int  (*cpu_up_prepare)(unsigned int cpu);
>      void (*cpu_dead)(unsigned int cpu);
> @@ -454,9 +454,9 @@ static inline int hvm_event_pending(struct vcpu *v)
>      return hvm_funcs.event_pending(v);
>  }
>  
> -static inline void hvm_invlpg(struct vcpu *v, unsigned long va)
> +static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
>  {
> -    hvm_funcs.invlpg(v, va);
> +    hvm_funcs.invlpg(v, linear);
>  }
>  
>  /* These bits in CR4 are owned by the host. */
> diff --git a/xen/include/asm-x86/hvm/svm/asid.h 
> b/xen/include/asm-x86/hvm/svm/asid.h
> index 60cbb7b881..0e5ec3ab78 100644
> --- a/xen/include/asm-x86/hvm/svm/asid.h
> +++ b/xen/include/asm-x86/hvm/svm/asid.h
> @@ -25,11 +25,11 @@
>  void svm_asid_init(const struct cpuinfo_x86 *c);
>  void svm_asid_handle_vmrun(void);
>  
> -static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
> +static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_linear)
>  {
>  #if 0
>      /* Optimization? */
> -    svm_invlpga(g_vaddr, v->arch.hvm.svm.vmcb->guest_asid);
> +    svm_invlpga(g_linear, v->arch.hvm.svm.vmcb->guest_asid);
>  #endif
>  
>      /* Safe fallback. Take a new ASID. */
> diff --git a/xen/include/asm-x86/hvm/svm/svm.h 
> b/xen/include/asm-x86/hvm/svm/svm.h
> index 4e5e142910..8166046a6d 100644
> --- a/xen/include/asm-x86/hvm/svm/svm.h
> +++ b/xen/include/asm-x86/hvm/svm/svm.h
> @@ -40,13 +40,13 @@ static inline void svm_vmsave_pa(paddr_t vmcb)
>          : : "a" (vmcb) : "memory" );
>  }
>  
> -static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
> +static inline void svm_invlpga(unsigned long linear, uint32_t asid)
>  {
>      asm volatile (
>          ".byte 0x0f,0x01,0xdf"
>          : /* output */
>          : /* input */
> -        "a" (vaddr), "c" (asid));
> +        "a" (linear), "c" (asid));
>  }
>  
>  unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
> diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
> index f440e3e53c..b51e1709d3 100644
> --- a/xen/include/asm-x86/paging.h
> +++ b/xen/include/asm-x86/paging.h
> @@ -110,7 +110,8 @@ struct shadow_paging_mode {
>  struct paging_mode {
>      int           (*page_fault            )(struct vcpu *v, unsigned long va,
>                                              struct cpu_user_regs *regs);
> -    bool          (*invlpg                )(struct vcpu *v, unsigned long 
> va);
> +    bool          (*invlpg                )(struct vcpu *v,
> +                                            unsigned long linear);
>      unsigned long (*gva_to_gfn            )(struct vcpu *v,
>                                              struct p2m_domain *p2m,
>                                              unsigned long va,
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.