[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [patch 1/2] linux-2.6.18-xen: mmconfig: Fix x86_64 ioremap base_address



Current mmconfig has some problems of remapped range.

a) In the case of broken MCFG tables on Asus etc., we need to remap 256M
   range, but currently only remap 1M.

b) The base address always corresponds to bus number 0, but currently we
   are assuming it corresponds to start bus number.

This patch fixes the above problems.

(akpm: Arjan suggests that if the MCFG table is broken we just shouldn't
use it, rather than try to work around things).

Back-ported to 2.6.18 by Simon Horman

Signed-off-by: OGAWA Hirofumi <hirofumi@xxxxxxxxxxxxxxxxxx>
Signed-off-by: Andi Kleen <ak@xxxxxxx>
Cc: Arjan van de Ven <arjan@xxxxxxxxxxxxxxx>
Cc: Andi Kleen <ak@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
Signed-off-by: Simon Horman <horms@xxxxxxxxxxxx>

--- 

Without this change mmconfig fails on x86_64 as it always tries
to map the full aperture, which is not allowed by the hypervisor
if that isn't the actual aperture of mmconfig.

Also needs "PCI x86: always use conf1 to access config space below 256 bytes",
posted separately.

Index: linux-2.6.18-xen.hg/arch/x86_64/pci/mmconfig.c
===================================================================
--- linux-2.6.18-xen.hg.orig/arch/x86_64/pci/mmconfig.c 2009-08-13 
11:08:33.000000000 +0900
+++ linux-2.6.18-xen.hg/arch/x86_64/pci/mmconfig.c      2009-08-13 
11:10:12.000000000 +0900
@@ -30,6 +30,36 @@ struct mmcfg_virt {
 };
 static struct mmcfg_virt *pci_mmcfg_virt;
 
+static inline int mcfg_broken(void)
+{
+       struct acpi_table_mcfg_config *cfg = &pci_mmcfg_config[0];
+
+       /* Handle more broken MCFG tables on Asus etc.
+          They only contain a single entry for bus 0-0. Assume
+          this applies to all busses. */
+       if (pci_mmcfg_config_num == 1 &&
+           cfg->pci_segment_group_number == 0 &&
+           (cfg->start_bus_number | cfg->end_bus_number) == 0)
+               return 1;
+       return 0;
+}
+
+static void __iomem * __init mcfg_ioremap(struct acpi_table_mcfg_config *cfg)
+{
+       void __iomem *addr;
+       u32 size;
+
+       size = (cfg->end_bus_number + 1) << 20;
+       printk(KERN_INFO "%s: end_bus_number=%d\n", __func__,
+              cfg->end_bus_number);
+       addr = ioremap_nocache(cfg->base_address, size);
+       if (addr) {
+               printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n",
+                      cfg->base_address, cfg->base_address + size - 1);
+       }
+       return addr;
+}
+
 static char __iomem *get_virt(unsigned int seg, unsigned bus)
 {
        int cfg_num = -1;
@@ -47,13 +77,7 @@ static char __iomem *get_virt(unsigned i
                        return pci_mmcfg_virt[cfg_num].virt;
        }
 
-       /* Handle more broken MCFG tables on Asus etc.
-          They only contain a single entry for bus 0-0. Assume
-          this applies to all busses. */
-       cfg = &pci_mmcfg_config[0];
-       if (pci_mmcfg_config_num == 1 &&
-               cfg->pci_segment_group_number == 0 &&
-               (cfg->start_bus_number | cfg->end_bus_number) == 0)
+       if (mcfg_broken())
                return pci_mmcfg_virt[0].virt;
 
        /* Fall back to type 0 */
@@ -194,14 +218,12 @@ void __init pci_mmcfg_init(void)
        }
        for (i = 0; i < pci_mmcfg_config_num; ++i) {
                pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
-               pci_mmcfg_virt[i].virt = 
ioremap_nocache(pci_mmcfg_config[i].base_address,
-                                                        MMCONFIG_APER_MAX);
+               pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]);
                if (!pci_mmcfg_virt[i].virt) {
                        printk("PCI: Cannot map mmconfig aperture for segment 
%d\n",
                               pci_mmcfg_config[i].pci_segment_group_number);
                        return;
                }
-               printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", 
pci_mmcfg_config[i].base_address);
        }
 
        unreachable_devices();

-- 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.