# HG changeset patch # User tristan.gingold@xxxxxxxx # Node ID 28af5f005e88f2c433ae3d94b1a087ef02df194a # Parent d36e26e32eff9b531c0b38d3526dc9190dd9cc81 vhpt_paddr & vhpt_pend are now per-cpu variables, since VHPT is allocated per cpu. This should really improve stability. Signed-off-by: Tristan Gingold diff -r d36e26e32eff -r 28af5f005e88 xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Mon Feb 27 10:07:32 2006 +++ b/xen/arch/ia64/xen/domain.c Mon Feb 27 10:49:07 2006 @@ -46,6 +46,7 @@ #include #include #include +#include #include #define CONFIG_DOMAIN0_CONTIGUOUS @@ -399,7 +400,6 @@ pud_t *pud; pmd_t *pmd; pte_t *pte; -extern unsigned long vhpt_paddr, vhpt_pend; if (!mm->pgd) { printk("assign_new_domain_page: domain pgd must exist!\n"); @@ -433,9 +433,11 @@ printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n"); return(p); } -if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) { - printf("assign_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p)); -} + if (unlikely(page_to_maddr(p) > __get_cpu_var(vhpt_paddr) + && page_to_maddr(p) < __get_cpu_var(vhpt_pend))) { + printf("assign_new_domain_page: reassigned vhpt page %p!!\n", + page_to_maddr(p)); + } set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX))); } diff -r d36e26e32eff -r 28af5f005e88 xen/arch/ia64/xen/regionreg.c --- a/xen/arch/ia64/xen/regionreg.c Mon Feb 27 10:07:32 2006 +++ b/xen/arch/ia64/xen/regionreg.c Mon Feb 27 10:49:07 2006 @@ -211,7 +211,6 @@ unsigned long rreg = REGION_NUMBER(rr); ia64_rr rrv, newrrv, memrrv; unsigned long newrid; - extern unsigned long vhpt_paddr; if (val == -1) return 1; @@ -249,10 +248,12 @@ newrrv.rid = newrid; newrrv.ve = 1; // VHPT now enabled for region 7!! newrrv.ps = PAGE_SHIFT; - if (rreg == 0) v->arch.metaphysical_saved_rr0 = - vmMangleRID(newrrv.rrval); - if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, - v->arch.privregs, vhpt_paddr, pal_vaddr); + if (rreg == 0) + v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval); + else if (rreg == 7) + ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, + v->arch.privregs, __get_cpu_var(vhpt_paddr), + pal_vaddr); else set_rr(rr,newrrv.rrval); #endif return 1; diff -r d36e26e32eff -r 28af5f005e88 xen/arch/ia64/xen/vhpt.c --- a/xen/arch/ia64/xen/vhpt.c Mon Feb 27 10:07:32 2006 +++ b/xen/arch/ia64/xen/vhpt.c Mon Feb 27 10:49:07 2006 @@ -15,7 +15,8 @@ #include #include -unsigned long vhpt_paddr, vhpt_pend, vhpt_pte; +DEFINE_PER_CPU (unsigned long, vhpt_paddr); +DEFINE_PER_CPU (unsigned long, vhpt_pend); void vhpt_flush(void) { @@ -77,12 +78,12 @@ } #endif -void vhpt_map(void) +static void vhpt_map(unsigned long pte) { unsigned long psr; psr = ia64_clear_ic(); - ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, vhpt_pte, VHPT_SIZE_LOG2); + ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2); ia64_set_psr(psr); ia64_srlz_i(); } @@ -122,6 +123,7 @@ void vhpt_init(void) { unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva; + unsigned long paddr, pte; #if !VHPT_ENABLED return; #endif @@ -139,11 +141,13 @@ printf("vhpt_init: can't allocate VHPT!\n"); while(1); } - vhpt_paddr = __pa(vhpt_imva); - vhpt_pend = vhpt_paddr + vhpt_total_size - 1; - printf("vhpt_init: vhpt paddr=%p, end=%p\n",vhpt_paddr,vhpt_pend); - vhpt_pte = pte_val(pfn_pte(vhpt_paddr >> PAGE_SHIFT, PAGE_KERNEL)); - vhpt_map(); + paddr = __pa(vhpt_imva); + __get_cpu_var(vhpt_paddr) = paddr; + __get_cpu_var (vhpt_pend) = paddr + vhpt_total_size - 1; + printf("vhpt_init: vhpt paddr=%p, end=%p\n", + paddr,__get_cpu_var(vhpt_pend)); + pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL)); + vhpt_map(pte); ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED); vhpt_flush(); diff -r d36e26e32eff -r 28af5f005e88 xen/include/asm-ia64/vhpt.h --- a/xen/include/asm-ia64/vhpt.h Mon Feb 27 10:07:32 2006 +++ b/xen/include/asm-ia64/vhpt.h Mon Feb 27 10:49:07 2006 @@ -121,6 +121,11 @@ extern void vhpt_insert (unsigned long vadr, unsigned long ptr, unsigned logps); extern void vhpt_flush(void); + +/* Currently the VHPT is allocated per CPU. */ +DECLARE_PER_CPU (unsigned long, vhpt_paddr); +DECLARE_PER_CPU (unsigned long, vhpt_pend); + #endif /* !__ASSEMBLY */ #if !VHPT_ENABLED