[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/2] qemu-xen: Intel GPU passthrough



On 01/02 02:52, Stefano Stabellini wrote:
> On Tue, 31 Jan 2012, Jean Guyader wrote:
> > 
> > Reset Intel GPU fences when the domain starts (first mapping
> > of Bar0).
> > 
> > Signed-off-by: Jean Guyader <jean.guyader@xxxxxxxxxxxxx>
> > ---
> >  hw/pt-graphics.c |  133 
> > ++++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  1 files changed, 133 insertions(+), 0 deletions(-)
> > 
> inline patches please
> 
> 
> > diff --git a/hw/pt-graphics.c b/hw/pt-graphics.c
> > index 5d5e5da..7403abe 100644
> > --- a/hw/pt-graphics.c
> > +++ b/hw/pt-graphics.c
> > @@ -13,6 +13,31 @@
> >  extern int gfx_passthru;
> >  extern int igd_passthru;
> >  
> > +#define IGFX_CANTIGA            0x10
> > +#define IGFX_IRONLAKE           0x20
> > +#define IGFX_IBEXPEAK           0x30
> > +#define IGFX_COUGARPOINT        0x40
> > +
> > +struct igfx_chip
> > +{
> > +    int chip;
> > +    uint16_t device_id;
> > +};
> > +
> > +struct igfx_chip igfx_chips[] =
> > +{
> > +    {IGFX_IRONLAKE,     0x0042},
> > +    {IGFX_IRONLAKE,     0x0046},
> > +    {IGFX_CANTIGA,      0x20e4},
> > +    {IGFX_CANTIGA,      0x2a42},
> > +    {IGFX_CANTIGA,      0x2e12},
> > +    {IGFX_COUGARPOINT,  0x0152},
> > +    {IGFX_COUGARPOINT,  0x0112},
> > +    {IGFX_COUGARPOINT,  0x0116},
> > +    {IGFX_COUGARPOINT,  0x0126},
> > +    {0, 0}
> > +};
> > +
> >  static int pch_map_irq(PCIDevice *pci_dev, int irq_num)
> >  {
> >      PT_LOG("pch_map_irq called\n");
> > @@ -37,6 +62,98 @@ void intel_pch_init(PCIBus *bus)
> >                          pch_map_irq, "intel_bridge_1f");
> >  }
> >  
> > +static int igd_get_chip(struct pt_dev *p)
> > +{
> > +    int i;
> > +    int chip = 0;
> > +    int devid = p->pci_dev->device_id;
> > +
> > +    for (i = 0; igfx_chips[i].chip; i++)
> > +        if (devid == igfx_chips[i].device_id)
> > +        {
> > +            chip = igfx_chips[i].chip;
> > +            break;
> > +        }
> > +
> > +    if (!chip)
> > +    {
> > +        if (devid & 0x2000)
> > +            chip = IGFX_CANTIGA;
> > +        else if (devid & 0x100)
> > +            chip =  IGFX_COUGARPOINT;
> > +        else
> > +            chip = IGFX_IRONLAKE;
> > +        PT_LOG("GUESS FOR CHIP 0x%04x as type %x", devid, chip);
> > +    }
> > +    return chip;
> > +}
> > +
> > +
> > +static uint32_t igd_mmio_read(struct pt_dev *p, uint32_t addr, uint8_t 
> > size)
> > +{
> > +    uint8_t *map = p->bases[0].map;
> > +    uint32_t ret;
> > +
> > +    switch (size)
> > +    {
> > +        case 1:
> > +            ret = *(volatile uint8_t *)(map + addr);
> > +            break;
> > +        case 2:
> > +            ret = *(volatile uint16_t *)(map + addr);
> > +            break;
> > +        case 4:
> > +            ret = *(volatile uint32_t *)(map + addr);
> > +            break;
> > +        default:
> > +            PT_LOG("igd_do_mmio: Unknown size %d\n", size);
> > +    }
> > +    return ret;
> > +}
> 
> igd_mmio_read is currently unused
> 
> 
> > +static void igd_mmio_write(struct pt_dev *p, uint32_t addr, uint32_t val,
> > +                           uint8_t size)
> > +{
> > +    uint8_t *map = p->bases[0].map;
> > +
> > +    switch (size)
> > +    {
> > +        case 1:
> > +            *(volatile uint8_t *)(map + addr) = (uint8_t)val;
> > +            break;
> > +        case 2:
> > +            *(volatile uint16_t *)(map + addr) = (uint16_t)val;
> > +            break;
> > +        case 4:
> > +            *(volatile uint32_t *)(map + addr) = (uint32_t)val;
> > +            break;
> > +        default:
> > +            PT_LOG("igd_do_mmio: Unknown size %d\n", size);
> > +    }
> > +}
> > +
> > +static void igd_reset_fences(struct pt_dev *pt_dev)
> > +{
> > +    int i = 0;
> > +    uint32_t fence_addr;
> > +
> > +    switch (igd_get_chip(pt_dev))
> > +    {
> > +        case IGFX_CANTIGA:
> > +        case IGFX_IRONLAKE:
> > +        case IGFX_IBEXPEAK:
> > +            fence_addr = 0x3000;
> > +        case IGFX_COUGARPOINT:
> > +            fence_addr = 0x100000;
> > +    }
> > +
> > +    for (i = 0; i < 16; i++)
> > +    {
> > +        igd_mmio_write(pt_dev, fence_addr + (i << 4), 0, 4);
> > +        igd_mmio_write(pt_dev, fence_addr + (i << 4) + 4, 0, 4);
> > +    }
> > +}
> > +
> >  void igd_pci_write(PCIDevice *pci_dev, uint32_t config_addr, uint32_t val, 
> > int len)
> >  {
> >      struct pci_dev *pci_dev_host_bridge = pt_pci_get_dev(0, 0, 0);
> > @@ -98,6 +215,16 @@ uint32_t igd_pci_read(PCIDevice *pci_dev, uint32_t 
> > config_addr, int len)
> >   */
> >  void pt_graphic_bar_remap(struct pt_dev *real_device, int bar, int 
> > first_map, int map)
> >  {
> > +    /*
> > +     * Reset the fence register on the first remap
> > +     * of Bar0 for a Intel GPU
> > +     */
> > +    if (real_device->pci_dev->device_class == 0x0300 &&
> > +            real_device->pci_dev->device_id == PCI_VENDOR_ID_INTEL &&
> > +            bar == 0 && first_map && map == DPCI_ADD_MAPPING)
> > +    {
> > +        igd_reset_fences(real_device);
> > +    }
> >  }
> 
> coding style, see http://git.savannah.gnu.org/cgit/qemu.git/tree/CODING_STYLE
> 
> 
> >  /*
> > @@ -137,6 +264,12 @@ int register_vga_regions(struct pt_dev *real_device)
> >          PT_LOG("register_vga: igd_opregion = %x\n", igd_opregion);
> >      }
> >  
> > +    if (vendor_id == PCI_VENDOR_ID_INTEL)
> > +    {
> > +        if (pt_pci_host_map_bar(real_device, 0) != 0)
> > +            PT_LOG("Can't map Intel Bar 0\n");
> > +    }
> 
> How fatal is this error? Maybe we need to return an error from
> register_vga_regions and propagate it upward?

It's not actually fatal. If you don't reset the fences the VESA buffer
in the guest will look corrupted but appart for that it will work fine.
So I think I will just make my code silent if the map isn't mapped.

Jean

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.