[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH, v2] x86: allow Dom0 read-only access to IO-APICs



There are BIOSes that want to map the IO-APIC MMIO region from some
ACPI method(s), and there is at least one BIOS flavor that wants to
use this mapping to clear an RTE's mask bit. While we can't allow the
latter, we can permit reads and simply drop write attempts, leveraging
the already existing infrastructure introduced for dealing with AMD
IOMMUs' representation as PCI devices.

This fixes an interrupt setup problem on a system where _CRS evaluation
involved the above described BIOS/ACPI behavior, and is expected to
also deal with a boot time crash of pv-ops Linux upon encountering the
same kind of system.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Change placement of reangeset_new() invocation, move declaration
    and definition of mmio_ro_ranges to x86 specific locations.

--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1114,7 +1114,7 @@ int __init construct_dom0(
     for ( i = 0; i < nr_ioapics; i++ )
     {
         mfn = paddr_to_pfn(mp_ioapics[i].mpc_apicaddr);
-        if ( smp_found_config )
+        if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
             rc |= iomem_deny_access(dom0, mfn, mfn);
     }
 
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -2507,6 +2507,11 @@ void __init init_ioapic_mappings(void)
             reg_01.raw = io_apic_read(i, 1);
             nr_ioapic_entries[i] = reg_01.bits.entries + 1;
             nr_irqs_gsi += nr_ioapic_entries[i];
+
+            if ( rangeset_add_singleton(mmio_ro_ranges,
+                                        ioapic_phys >> PAGE_SHIFT) )
+                printk(KERN_ERR "Failed to mark IO-APIC page %lx read-only\n",
+                       ioapic_phys);
         }
     }
 
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -151,6 +151,8 @@ unsigned long __read_mostly pdx_group_va
 
 bool_t __read_mostly machine_to_phys_mapping_valid = 0;
 
+struct rangeset *__read_mostly mmio_ro_ranges;
+
 #define PAGE_CACHE_ATTRS (_PAGE_PAT|_PAGE_PCD|_PAGE_PWT)
 
 bool_t __read_mostly opt_allow_superpage;
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1216,6 +1216,9 @@ void __init __start_xen(unsigned long mb
     /* Low mappings were only needed for some BIOS table parsing. */
     zap_low_mappings();
 
+    mmio_ro_ranges = rangeset_new(NULL, "r/o mmio ranges",
+                                  RANGESETF_prettyprint_hex);
+
     init_apic_mappings();
 
     normalise_cpu_order();
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -27,8 +27,6 @@
 #include <xen/hvm/irq.h>
 #include <xen/tasklet.h>
 
-struct rangeset *__read_mostly mmio_ro_ranges;
-
 static void hvm_dirq_assist(unsigned long _d);
 
 bool_t pt_irq_need_timer(uint32_t flags)
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -105,8 +105,6 @@ void __init pt_pci_init(void)
     radix_tree_init(&pci_segments);
     if ( !alloc_pseg(0) )
         panic("Could not initialize PCI segment 0\n");
-    mmio_ro_ranges = rangeset_new(NULL, "r/o mmio ranges",
-                                  RANGESETF_prettyprint_hex);
 }
 
 int __init pci_add_segment(u16 seg)
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -500,6 +500,8 @@ extern bool_t machine_to_phys_mapping_va
         _set_gpfn_from_mfn(mfn, pfn);           \
 } while (0)
 
+extern struct rangeset *mmio_ro_ranges;
+
 #define get_gpfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
 
 #define mfn_to_gmfn(_d, mfn)                            \
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -37,8 +37,6 @@ extern bool_t amd_iommu_perdev_intremap;
 /* Does this domain have a P2M table we can use as its IOMMU pagetable? */
 #define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
 
-extern struct rangeset *mmio_ro_ranges;
-
 #define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
 
 #define MAX_IOMMUS 32



Attachment: x86-ioapic-dom0-allow-ro.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.