[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2] x86: synchronize PCI config space access decoding



On 15/06/15 15:30, Jan Beulich wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2406,11 +2406,6 @@ void hvm_vcpu_down(struct vcpu *v)
>  struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
>                                                   ioreq_t *p)
>  {
> -#define CF8_BDF(cf8)     (((cf8) & 0x00ffff00) >> 8)
> -#define CF8_ADDR_LO(cf8) ((cf8) & 0x000000fc)
> -#define CF8_ADDR_HI(cf8) (((cf8) & 0x0f000000) >> 16)
> -#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000))
> -
>      struct hvm_ioreq_server *s;
>      uint32_t cf8;
>      uint8_t type;
> @@ -2439,9 +2434,19 @@ struct hvm_ioreq_server *hvm_select_iore
>  
>          type = IOREQ_TYPE_PCI_CONFIG;
>          addr = ((uint64_t)sbdf << 32) |
> -               CF8_ADDR_HI(cf8) |
>                 CF8_ADDR_LO(cf8) |
>                 (p->addr & 3);
> +        /* AMD extended configuration space access? */
> +        if ( CF8_ADDR_HI(cf8) &&
> +             d->arch.x86_vendor == X86_VENDOR_AMD &&
> +             d->arch.x86 >= 0x10 && d->arch.x86 <= 0x17 )
> +        {
> +            uint64_t msr_val;
> +
> +            if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) &&

We now have several common paths which read this MSR looking for CF8_EXT.

I think it would make sense to probe this on boot and have a
cpu_has_amd_cf8_ext rather than repeatedly sampling an off-cpu MSR,
although this would require synchronising it across all northbridges in
emulate privileged op.

Alternatively, it might just be better to unconditionally enable it
during startup (as Linux does) and prevent dom0 from playing, which
would avoid the need to synchronise updates to it.

> +                 (msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
> +                addr |= CF8_ADDR_HI(cf8);
> +        }
>      }
>      else
>      {
> @@ -2495,11 +2500,6 @@ struct hvm_ioreq_server *hvm_select_iore
>      }
>  
>      return d->arch.hvm_domain.default_ioreq_server;
> -
> -#undef CF8_ADDR_ENABLED
> -#undef CF8_ADDR_HI
> -#undef CF8_ADDR_LO
> -#undef CF8_BDF
>  }
>  
>  int hvm_buffered_io_send(ioreq_t *p)
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -1771,15 +1771,18 @@ static bool_t admin_io_okay(unsigned int
>      return ioports_access_permitted(d, port, port + bytes - 1);
>  }
>  
> -static bool_t pci_cfg_ok(struct domain *currd, bool_t write, unsigned int 
> size)
> +static bool_t pci_cfg_ok(struct domain *currd, bool_t write,
> +                         unsigned int start, unsigned int size)
>  {
>      uint32_t machine_bdf;
> -    unsigned int start;
>  
>      if ( !is_hardware_domain(currd) )
>          return 0;
>  
> -    machine_bdf = (currd->arch.pci_cf8 >> 8) & 0xFFFF;
> +    if ( !CF8_ENABLED(currd->arch.pci_cf8) )
> +        return 1;
> +
> +    machine_bdf = CF8_BDF(currd->arch.pci_cf8);
>      if ( write )
>      {
>          const unsigned long *ro_map = pci_get_ro_map(0);
> @@ -1787,9 +1790,9 @@ static bool_t pci_cfg_ok(struct domain *
>          if ( ro_map && test_bit(machine_bdf, ro_map) )
>              return 0;
>      }
> -    start = currd->arch.pci_cf8 & 0xFF;
> +    start |= CF8_ADDR_LO(currd->arch.pci_cf8);

This, combined with the change to the callers, looks suspect.

The callers are both accesses at cfc, with port&3 being the offset at
the port.  This logical or here is combining the base offset to cfc with
the destination address requested via the setting in cf8.

Is this intentional, and ifso, why?

> --- a/xen/include/asm-x86/pci.h
> +++ b/xen/include/asm-x86/pci.h
> @@ -1,6 +1,11 @@
>  #ifndef __X86_PCI_H__
>  #define __X86_PCI_H__
>  
> +#define CF8_BDF(cf8)     (((cf8) & 0x00ffff00) >> 8)
> +#define CF8_ADDR_LO(cf8) ((cf8) & 0x000000fc)
> +#define CF8_ADDR_HI(cf8) (((cf8) & 0x0f000000) >> 16)
> +#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000))

As you are moving these, would you mind lining the '(cf8) &' bits
vertically, which will make them far easier to read.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.