[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v19 12/14] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr



Am Mittwoch 25 MÃrz 2015, 09:29:14 schrieb Dietmar Hahn:
> Am Dienstag 17 MÃrz 2015, 10:54:09 schrieb Boris Ostrovsky:
> > The two routines share most of their logic.
> > 
> > Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> > ---
> > Changes in v19:
> > * const-ified arch_vpmu_ops in vpmu_do_wrmsr
> > * non-changes:
> >    - kept 'current' as a non-initializer to avoid unnecessary initialization
> >      in the (common) non-VPMU case
> >    - kept 'nop' label since there are multiple dissimilar cases that can 
> > cause
> >      a non-emulation of VPMU access
> > 
> >  xen/arch/x86/hvm/vpmu.c        | 76 
> > +++++++++++++++++-------------------------
> >  xen/include/asm-x86/hvm/vpmu.h | 14 ++++++--
> >  2 files changed, 42 insertions(+), 48 deletions(-)
> > 
> > diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
> > index c287d8b..beed956 100644
> > --- a/xen/arch/x86/hvm/vpmu.c
> > +++ b/xen/arch/x86/hvm/vpmu.c
> > @@ -103,63 +103,47 @@ void vpmu_lvtpc_update(uint32_t val)
> >          apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
> >  }
> >  
> > -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t 
> > supported)
> > +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
> > +                uint64_t supported, bool_t is_write)
> >  {
> > -    struct vcpu *curr = current;
> > +    struct vcpu *curr;
> >      struct vpmu_struct *vpmu;
> > +    const struct arch_vpmu_ops *ops;
> 
> What is this const useful for in this small function?

Sorry for the question and for the noise I already got it!

> 
> Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
> 
> Dietmar.
> 
> > +    int ret = 0;
> >  
> >      if ( vpmu_mode == XENPMU_MODE_OFF )
> > -        return 0;
> > +        goto nop;
> >  
> > +    curr = current;
> >      vpmu = vcpu_vpmu(curr);
> > -    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
> > -    {
> > -        int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content, 
> > supported);
> > -
> > -        /*
> > -         * We may have received a PMU interrupt during WRMSR handling
> > -         * and since do_wrmsr may load VPMU context we should save
> > -         * (and unload) it again.
> > -         */
> > -        if ( !is_hvm_vcpu(curr) && vpmu->xenpmu_data &&
> > -             (vpmu->xenpmu_data->pmu.pmu_flags & PMU_CACHED) )
> > -        {
> > -            vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
> > -            vpmu->arch_vpmu_ops->arch_vpmu_save(curr);
> > -            vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
> > -        }
> > -        return ret;
> > -    }
> > -
> > -    return 0;
> > -}
> > -
> > -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> > -{
> > -    struct vcpu *curr = current;
> > -    struct vpmu_struct *vpmu;
> > +    ops = vpmu->arch_vpmu_ops;
> > +    if ( !ops )
> > +        goto nop;
> > +
> > +    if ( is_write && ops->do_wrmsr )
> > +        ret = ops->do_wrmsr(msr, *msr_content, supported);
> > +    else if ( !is_write && ops->do_rdmsr )
> > +        ret = ops->do_rdmsr(msr, msr_content);
> > +    else
> > +        goto nop;
> >  
> > -    if ( vpmu_mode == XENPMU_MODE_OFF )
> > +    /*
> > +     * We may have received a PMU interrupt while handling MSR access
> > +     * and since do_wr/rdmsr may load VPMU context we should save
> > +     * (and unload) it again.
> > +     */
> > +    if ( !is_hvm_vcpu(curr) &&
> > +         vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu.pmu_flags & 
> > PMU_CACHED) )
> >      {
> > -        *msr_content = 0;
> > -        return 0;
> > +        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
> > +        ops->arch_vpmu_save(curr);
> > +        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
> >      }
> >  
> > -    vpmu = vcpu_vpmu(curr);
> > -    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
> > -    {
> > -        int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
> > +    return ret;
> >  
> > -        if ( !is_hvm_vcpu(curr) && vpmu->xenpmu_data &&
> > -             (vpmu->xenpmu_data->pmu.pmu_flags & PMU_CACHED) )
> > -        {
> > -            vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
> > -            vpmu->arch_vpmu_ops->arch_vpmu_save(curr);
> > -            vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
> > -        }
> > -        return ret;
> > -    }
> > -    else
> > + nop:
> > +    if ( !is_write )
> >          *msr_content = 0;
> >  
> >      return 0;
> > diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h
> > index 642a4b7..63851a7 100644
> > --- a/xen/include/asm-x86/hvm/vpmu.h
> > +++ b/xen/include/asm-x86/hvm/vpmu.h
> > @@ -99,8 +99,8 @@ static inline bool_t vpmu_are_all_set(const struct 
> > vpmu_struct *vpmu,
> >  }
> >  
> >  void vpmu_lvtpc_update(uint32_t val);
> > -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t 
> > supported);
> > -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
> > +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
> > +                uint64_t supported, bool_t is_write);
> >  void vpmu_do_interrupt(struct cpu_user_regs *regs);
> >  void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int 
> > *ebx,
> >                                         unsigned int *ecx, unsigned int 
> > *edx);
> > @@ -110,6 +110,16 @@ void vpmu_save(struct vcpu *v);
> >  void vpmu_load(struct vcpu *v);
> >  void vpmu_dump(struct vcpu *v);
> >  
> > +static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
> > +                                uint64_t supported)
> > +{
> > +    return vpmu_do_msr(msr, &msr_content, supported, 1);
> > +}
> > +static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> > +{
> > +    return vpmu_do_msr(msr, msr_content, 0, 0);
> > +}
> > +
> >  extern int acquire_pmu_ownership(int pmu_ownership);
> >  extern void release_pmu_ownership(int pmu_ownership);
> >  
> > 
> 
> 

-- 
Company details: http://ts.fujitsu.com/imprint.html

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.