# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1263469613 0
# Node ID 4a54c794bfd4901eaafcadaae194152472e9d889
# Parent e406e3451835a6829ce602b0003a6b1584415ebd
x86: Fix and clarify 20803:50bd4235f486
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/setup.c | 35 +++++++++++++++++++++++++---------
xen/arch/x86/xen.lds.S | 1
xen/drivers/passthrough/vtd/x86/vtd.c | 4 +--
3 files changed, 29 insertions(+), 11 deletions(-)
diff -r e406e3451835 -r 4a54c794bfd4 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Thu Jan 14 10:14:17 2010 +0000
+++ b/xen/arch/x86/setup.c Thu Jan 14 11:46:53 2010 +0000
@@ -203,6 +203,8 @@ static void __init percpu_init_areas(voi
{
unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
+ BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
+ BUG_ON((unsigned long)__per_cpu_data_end & ~PAGE_MASK);
BUG_ON(data_size > PERCPU_SIZE);
/* Initialise per-cpu data area for all possible secondary CPUs. */
@@ -230,7 +232,6 @@ static void __init percpu_free_unused_ar
/* Free all unused per-cpu data areas. */
free_xen_data(&__per_cpu_start[first_unused << PERCPU_SHIFT], __bss_start);
- data_size = (data_size + PAGE_SIZE - 1) & PAGE_MASK;
if ( data_size != PERCPU_SIZE )
for ( i = 0; i < first_unused; i++ )
free_xen_data(&__per_cpu_start[(i << PERCPU_SHIFT) + data_size],
@@ -1195,15 +1196,15 @@ void arch_get_xen_caps(xen_capabilities_
}
}
-int xen_in_range(paddr_t start, paddr_t end)
-{
+int xen_in_range(unsigned long mfn)
+{
+ paddr_t start, end;
int i;
enum { region_s3, region_text, region_percpu, region_bss, nr_regions };
static struct {
paddr_t s, e;
} xen_regions[nr_regions];
- static unsigned int percpu_data_size;
/* initialize first time */
if ( !xen_regions[0].s )
@@ -1218,17 +1219,33 @@ int xen_in_range(paddr_t start, paddr_t
xen_regions[region_percpu].s = __pa(&__per_cpu_start);
xen_regions[region_percpu].e = xen_regions[2].s +
(((paddr_t)last_cpu(cpu_possible_map) + 1) << PERCPU_SHIFT);
- percpu_data_size = __per_cpu_data_end - __per_cpu_start;
- percpu_data_size = (percpu_data_size + PAGE_SIZE - 1) & PAGE_MASK;
/* bss */
xen_regions[region_bss].s = __pa(&__bss_start);
xen_regions[region_bss].e = __pa(&_end);
}
+ start = (paddr_t)mfn << PAGE_SHIFT;
+ end = start + PAGE_SIZE;
for ( i = 0; i < nr_regions; i++ )
- if ( (start < xen_regions[i].e) && (end > xen_regions[i].s) )
- return ((i != region_percpu) ||
- ((start & (PERCPU_SIZE - 1)) < percpu_data_size));
+ {
+ if ( (start >= xen_regions[i].e) || (end <= xen_regions[i].s) )
+ continue;
+
+ if ( i == region_percpu )
+ {
+ /*
+ * Check if the given page falls into an unused (and therefore
+ * freed) section of the per-cpu data space. Each CPU's data
+ * area is page-aligned, so the following arithmetic is safe.
+ */
+ unsigned int off = ((start - (unsigned long)__per_cpu_start)
+ & (PERCPU_SIZE - 1));
+ unsigned int data_sz = __per_cpu_data_end - __per_cpu_start;
+ return off < data_sz;
+ }
+
+ return 1;
+ }
return 0;
}
diff -r e406e3451835 -r 4a54c794bfd4 xen/arch/x86/xen.lds.S
--- a/xen/arch/x86/xen.lds.S Thu Jan 14 10:14:17 2010 +0000
+++ b/xen/arch/x86/xen.lds.S Thu Jan 14 11:46:53 2010 +0000
@@ -104,6 +104,7 @@ SECTIONS
*(.data.percpu)
. = ALIGN(SMP_CACHE_BYTES);
*(.data.percpu.read_mostly)
+ . = ALIGN(PAGE_SIZE);
__per_cpu_data_end = .;
} :text
. = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
diff -r e406e3451835 -r 4a54c794bfd4 xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c Thu Jan 14 10:14:17 2010 +0000
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c Thu Jan 14 11:46:53 2010 +0000
@@ -133,7 +133,7 @@ void iommu_set_dom0_mapping(struct domai
void iommu_set_dom0_mapping(struct domain *d)
{
u64 i, j, tmp, max_pfn;
- extern int xen_in_range(paddr_t start, paddr_t end);
+ extern int xen_in_range(unsigned long mfn);
BUG_ON(d->domain_id != 0);
@@ -153,7 +153,7 @@ void iommu_set_dom0_mapping(struct domai
continue;
/* Exclude Xen bits */
- if ( xen_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) )
+ if ( xen_in_range(i) )
continue;
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|