vcpus can be switched right after set_virtual_rr0().
It causes a wrong TLB insertion.
Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
diff -r 9c0a654157cb xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Wed May 28 18:10:44 2008 +0900
+++ b/xen/arch/ia64/xen/vcpu.c Thu May 29 10:32:31 2008 +0900
@@ -1383,13 +1383,16 @@ vcpu_get_domain_bundle(VCPU * vcpu, REGS
// try to access gip with guest virtual address
// This may cause tlb miss. see vcpu_translate(). Be careful!
- swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));
- if (swap_rr0) {
+ if (unlikely(region == 0 && PSCB(vcpu, metaphysical_mode))) {
+ u64 flags;
+
+ local_irq_save(flags);
set_virtual_rr0();
- }
- *bundle = __get_domain_bundle(gip);
- if (swap_rr0) {
+ *bundle = __get_domain_bundle(gip);
set_metaphysical_rr0();
+ local_irq_restore(flags);
+ } else {
+ *bundle = __get_domain_bundle(gip);
}
if (bundle->i64[0] == 0 && bundle->i64[1] == 0) {
dprintk(XENLOG_INFO, "%s gip 0x%lx\n", __func__, gip);
@@ -2084,8 +2087,13 @@ vcpu_rebuild_vhpt(VCPU * vcpu, u64 ps)
#endif
}
+/*
+ * flags & 1: execute itc.i
+ * flags & 2: execute itc.d
+ * flags & 8: check metaphysical mode and swap rr0
+ */
void
-vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
+vcpu_itc_no_srlz(VCPU * vcpu, u64 flags, u64 vaddr, u64 pte,
u64 mp_pte, u64 itir, struct p2m_entry *entry)
{
ia64_itir_t _itir = {.itir = itir};
@@ -2103,7 +2111,14 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD,
psr = ia64_clear_ic();
pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits.
// FIXME: look for bigger mappings
- ia64_itc(IorD, vaddr, pte, _itir.itir);
+ if ((flags & 8) && (REGION_NUMBER(vaddr) == 0)
+ && PSCB(vcpu, metaphysical_mode)) {
+ set_virtual_rr0();
+ ia64_itc(flags, vaddr, pte, _itir.itir);
+ set_metaphysical_rr0();
+ } else {
+ ia64_itc(flags, vaddr, pte, _itir.itir);
+ }
ia64_set_psr(psr);
// ia64_srlz_i(); // no srls req'd, will rfi later
if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
@@ -2126,7 +2141,6 @@ IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pt
IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
{
unsigned long pteval;
- BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
struct p2m_entry entry;
ia64_itir_t _itir = {.itir = itir};
@@ -2138,11 +2152,7 @@ IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pt
pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
if (!pteval)
return IA64_ILLOP_FAULT;
- if (swap_rr0)
- set_virtual_rr0();
- vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
- if (swap_rr0)
- set_metaphysical_rr0();
+ vcpu_itc_no_srlz(vcpu, 2|8, ifa, pteval, pte, _itir.itir, &entry);
if (p2m_entry_retry(&entry)) {
vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
goto again;
@@ -2154,7 +2164,6 @@ IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pt
IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
{
unsigned long pteval;
- BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
struct p2m_entry entry;
ia64_itir_t _itir = {.itir = itir};
@@ -2166,11 +2175,7 @@ IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pt
pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
if (!pteval)
return IA64_ILLOP_FAULT;
- if (swap_rr0)
- set_virtual_rr0();
- vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
- if (swap_rr0)
- set_metaphysical_rr0();
+ vcpu_itc_no_srlz(vcpu, 1|8, ifa, pteval, pte, _itir.itir, &entry);
if (p2m_entry_retry(&entry)) {
vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
goto again;
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|