# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID a220579c2aa8b579ae2019ca2133476c6482a0aa
# Parent a4a4a28a16861e723af49ccd3ef948c52f77e1dc
[IA64] fix SMP bug for vhpt
vhpt_paddr & vhpt_pend are now per-cpu variables, since VHPT is allocated
per cpu. This should really improve stability.
Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
diff -r a4a4a28a1686 -r a220579c2aa8 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Mon Feb 27 20:31:10 2006
+++ b/xen/arch/ia64/xen/domain.c Mon Feb 27 21:00:33 2006
@@ -46,6 +46,7 @@
#include <asm/vmx_vcpu.h>
#include <asm/vmx_vpd.h>
#include <asm/pal.h>
+#include <asm/vhpt.h>
#include <public/hvm/ioreq.h>
#define CONFIG_DOMAIN0_CONTIGUOUS
@@ -399,7 +400,6 @@
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
-extern unsigned long vhpt_paddr, vhpt_pend;
if (!mm->pgd) {
printk("assign_new_domain_page: domain pgd must exist!\n");
@@ -433,9 +433,11 @@
printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
return(p);
}
-if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
- printf("assign_new_domain_page: reassigned vhpt page
%p!!\n",page_to_maddr(p));
-}
+ if (unlikely(page_to_maddr(p) > __get_cpu_var(vhpt_paddr)
+ && page_to_maddr(p) < __get_cpu_var(vhpt_pend))) {
+ printf("assign_new_domain_page: reassigned vhpt page %p!!\n",
+ page_to_maddr(p));
+ }
set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
}
diff -r a4a4a28a1686 -r a220579c2aa8 xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c Mon Feb 27 20:31:10 2006
+++ b/xen/arch/ia64/xen/regionreg.c Mon Feb 27 21:00:33 2006
@@ -211,7 +211,6 @@
unsigned long rreg = REGION_NUMBER(rr);
ia64_rr rrv, newrrv, memrrv;
unsigned long newrid;
- extern unsigned long vhpt_paddr;
if (val == -1) return 1;
@@ -249,10 +248,12 @@
newrrv.rid = newrid;
newrrv.ve = 1; // VHPT now enabled for region 7!!
newrrv.ps = PAGE_SHIFT;
- if (rreg == 0) v->arch.metaphysical_saved_rr0 =
- vmMangleRID(newrrv.rrval);
- if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
- v->arch.privregs, vhpt_paddr, pal_vaddr);
+ if (rreg == 0)
+ v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
+ else if (rreg == 7)
+ ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+ v->arch.privregs, __get_cpu_var(vhpt_paddr),
+ pal_vaddr);
else set_rr(rr,newrrv.rrval);
#endif
return 1;
diff -r a4a4a28a1686 -r a220579c2aa8 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c Mon Feb 27 20:31:10 2006
+++ b/xen/arch/ia64/xen/vhpt.c Mon Feb 27 21:00:33 2006
@@ -15,7 +15,8 @@
#include <asm/dma.h>
#include <asm/vhpt.h>
-unsigned long vhpt_paddr, vhpt_pend, vhpt_pte;
+DEFINE_PER_CPU (unsigned long, vhpt_paddr);
+DEFINE_PER_CPU (unsigned long, vhpt_pend);
void vhpt_flush(void)
{
@@ -77,12 +78,12 @@
}
#endif
-void vhpt_map(void)
+static void vhpt_map(unsigned long pte)
{
unsigned long psr;
psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, vhpt_pte, VHPT_SIZE_LOG2);
+ ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
ia64_set_psr(psr);
ia64_srlz_i();
}
@@ -122,6 +123,7 @@
void vhpt_init(void)
{
unsigned long vhpt_total_size, vhpt_alignment;
+ unsigned long paddr, pte;
struct page_info *page;
#if !VHPT_ENABLED
return;
@@ -141,11 +143,13 @@
printf("vhpt_init: can't allocate VHPT!\n");
while(1);
}
- vhpt_paddr = page_to_maddr(page);
- vhpt_pend = vhpt_paddr + vhpt_total_size - 1;
- printf("vhpt_init: vhpt paddr=%p, end=%p\n",vhpt_paddr,vhpt_pend);
- vhpt_pte = pte_val(pfn_pte(vhpt_paddr >> PAGE_SHIFT, PAGE_KERNEL));
- vhpt_map();
+ paddr = page_to_maddr(page);
+ __get_cpu_var(vhpt_paddr) = paddr;
+ __get_cpu_var(vhpt_pend) = paddr + vhpt_total_size - 1;
+ printf("vhpt_init: vhpt paddr=%p, end=%p\n",
+ paddr, __get_cpu_var(vhpt_pend));
+ pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
+ vhpt_map(pte);
ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
VHPT_ENABLED);
vhpt_flush();
diff -r a4a4a28a1686 -r a220579c2aa8 xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h Mon Feb 27 20:31:10 2006
+++ b/xen/include/asm-ia64/vhpt.h Mon Feb 27 21:00:33 2006
@@ -121,6 +121,11 @@
extern void vhpt_insert (unsigned long vadr, unsigned long ptr,
unsigned logps);
extern void vhpt_flush(void);
+
+/* Currently the VHPT is allocated per CPU. */
+DECLARE_PER_CPU (unsigned long, vhpt_paddr);
+DECLARE_PER_CPU (unsigned long, vhpt_pend);
+
#endif /* !__ASSEMBLY */
#if !VHPT_ENABLED
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|