[IA64] allocate percpu area in xen va area. To guarantee that the percpu are is pinned down, move its virtual address from xen identity mapped area to xen va area which is pinned by DTR[IA64_TR_KERNEL]. Then unnecessary tlb miss fault can be avoided. Sometimes per cpu area is called from very critial point where such a tlb miss isn't allowed. Signed-off-by: Isaku Yamahata diff -r 9feafce8afc5 xen/arch/ia64/linux-xen/mm_contig.c --- a/xen/arch/ia64/linux-xen/mm_contig.c Wed Aug 06 11:55:56 2008 +0900 +++ b/xen/arch/ia64/linux-xen/mm_contig.c Wed Aug 06 16:08:06 2008 +0900 @@ -175,6 +175,41 @@ #endif #ifdef CONFIG_SMP +#ifdef XEN +#include + +void *percpu_area __initdata = NULL; + +void* __init +per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa) +{ + int order = get_order(NR_CPUS * PERCPU_PAGE_SIZE); + unsigned long size = (1UL << order) * PAGE_SIZE; + unsigned long start = ALIGN_UP((unsigned long)xen_heap_start, + PERCPU_PAGE_SIZE); + unsigned long end = start + size; + + if (__pa(end) < end_in_pa) { + /* it is desirable to free up unused area... + but we have to cope with freeing __init, ... */ + /* init_xenheap_pages(__pa(xen_heap_start), __pa(start));*/ + xen_heap_start = (void*)end; + percpu_area = (void*)virt_to_xenva(start); + printk("allocate percpu area 0x%lx@0x%lx 0x%p\n", + size, start, percpu_area); + } else { + panic("can't allocate percpu area. size 0x%lx\n", size); + } + return xen_heap_start; +} + +static void* __init +get_per_cpu_area(void) +{ + return percpu_area; +} +#endif + /** * per_cpu_init - setup per-cpu variables * @@ -193,13 +228,7 @@ */ if (smp_processor_id() == 0) { #ifdef XEN - struct page_info *page; - page = alloc_domheap_pages(NULL, - get_order(NR_CPUS * - PERCPU_PAGE_SIZE), 0); - if (page == NULL) - panic("can't allocate per cpu area.\n"); - cpu_data = page_to_virt(page); + cpu_data = get_per_cpu_area(); #else cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); diff -r 9feafce8afc5 xen/arch/ia64/xen/xensetup.c --- a/xen/arch/ia64/xen/xensetup.c Wed Aug 06 11:55:56 2008 +0900 +++ b/xen/arch/ia64/xen/xensetup.c Wed Aug 06 16:08:06 2008 +0900 @@ -566,6 +566,13 @@ if (vmx_enabled) xen_heap_start = vmx_init_env(xen_heap_start, xenheap_phys_end); + /* allocate memory for percpu area + * per_cpu_init() called from late_set_arch() is called after + * end_boot_allocate(). It's too late to allocate memory in + * xenva. + */ + xen_heap_start = per_cpu_allocate(xen_heap_start, xenheap_phys_end); + heap_desc.xen_heap_start = xen_heap_start; heap_desc.xenheap_phys_end = xenheap_phys_end; heap_desc.kern_md = kern_md; diff -r 9feafce8afc5 xen/include/asm-ia64/linux-xen/asm/percpu.h --- a/xen/include/asm-ia64/linux-xen/asm/percpu.h Wed Aug 06 11:55:56 2008 +0900 +++ b/xen/include/asm-ia64/linux-xen/asm/percpu.h Wed Aug 06 16:08:06 2008 +0900 @@ -50,12 +50,22 @@ extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); extern void setup_per_cpu_areas (void); extern void *per_cpu_init(void); +#ifdef XEN +extern void *per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa); +#endif #else /* ! SMP */ #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define per_cpu_init() (__phys_per_cpu_start) +#ifdef XEN +static inline void *per_cpu_allocate(void *xen_heap_start, + unsigned long end_in_pa) +{ + return xen_heap_start; +} +#endif #endif /* SMP */