# HG changeset patch # User dietmar.hahn@xxxxxxxxxxxxxxxxxxx # Node ID 9982c9674eb745b14caa3a7d94a1f88988905855 # Parent a82ee4db96cfd89453a525bb762929dcc7809c7b Changed some interfaces to use cr.itir instead of only logps in handling itc_i/itc_d. Signed-off-by: Dietmar Hahn diff -r a82ee4db96cf -r 9982c9674eb7 xen/arch/ia64/vmx/vmmu.c --- a/xen/arch/ia64/vmx/vmmu.c Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/arch/ia64/vmx/vmmu.c Tue Jul 24 11:26:54 2007 +0200 @@ -232,10 +232,10 @@ void machine_tlb_insert(struct vcpu *v, psr = ia64_clear_ic(); if ( cl == ISIDE_TLB ) { - ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps); + ia64_itc(1, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0)); } else { - ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps); + ia64_itc(2, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0)); } ia64_set_psr(psr); ia64_srlz_i(); diff -r a82ee4db96cf -r 9982c9674eb7 xen/arch/ia64/vmx/vtlb.c --- a/xen/arch/ia64/vmx/vtlb.c Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/arch/ia64/vmx/vtlb.c Tue Jul 24 11:26:54 2007 +0200 @@ -199,7 +199,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte, } else { phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); - ia64_itc(type + 1, va, phy_pte, itir_ps(itir)); + ia64_itc(type + 1, va, phy_pte, itir); ia64_set_psr(psr); ia64_srlz_i(); } @@ -562,7 +562,7 @@ int thash_purge_and_insert(VCPU *v, u64 u64 psr; phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); - ia64_itc(type + 1, ifa, phy_pte, ps); + ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0)); ia64_set_psr(psr); ia64_srlz_i(); // ps < mrr.ps, this is not supported diff -r a82ee4db96cf -r 9982c9674eb7 xen/arch/ia64/xen/faults.c --- a/xen/arch/ia64/xen/faults.c Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/arch/ia64/xen/faults.c Tue Jul 24 11:26:54 2007 +0200 @@ -168,7 +168,7 @@ void ia64_do_page_fault(unsigned long ad unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL); IA64FAULT fault; int is_ptc_l_needed = 0; - u64 logps; + ia64_itir_t _itir = {.itir = itir}; if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) @@ -190,14 +190,14 @@ void ia64_do_page_fault(unsigned long ad struct p2m_entry entry; unsigned long m_pteval; m_pteval = translate_domain_pte(pteval, address, itir, - &logps, &entry); + &(_itir.itir), &entry); vcpu_itc_no_srlz(current, is_data ? 2 : 1, address, - m_pteval, pteval, logps, &entry); + m_pteval, pteval, _itir.itir, &entry); if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) || p2m_entry_retry(&entry)) { /* dtlb has been purged in-between. This dtlb was matching. Undo the work. */ - vcpu_flush_tlb_vhpt_range(address, logps); + vcpu_flush_tlb_vhpt_range(address, _itir.ps); // the stale entry which we inserted above // may remains in tlb cache. @@ -209,7 +209,7 @@ void ia64_do_page_fault(unsigned long ad } if (is_ptc_l_needed) - vcpu_ptc_l(current, address, logps); + vcpu_ptc_l(current, address, _itir.ps); if (!guest_mode(regs)) { /* The fault occurs inside Xen. */ if (!ia64_done_with_exception(regs)) { diff -r a82ee4db96cf -r 9982c9674eb7 xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/arch/ia64/xen/mm.c Tue Jul 24 11:26:54 2007 +0200 @@ -448,11 +448,11 @@ gmfn_to_mfn_foreign(struct domain *d, un // address, convert the pte for a physical address for (possibly different) // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use // PAGE_SIZE!) -u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir, struct p2m_entry* entry) { struct domain *d = current->domain; - ia64_itir_t itir = {.itir = itir__}; + ia64_itir_t _itir = {.itir = itir__}; u64 mask, mpaddr, pteval2; u64 arflags; u64 arflags2; @@ -461,13 +461,14 @@ u64 translate_domain_pte(u64 pteval, u64 pteval &= ((1UL << 53) - 1);// ignore [63:53] bits // FIXME address had better be pre-validated on insert - mask = ~itir_mask(itir.itir); + mask = ~itir_mask(_itir.itir); mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask); - if (itir.ps > PAGE_SHIFT) - itir.ps = PAGE_SHIFT; - - *logps = itir.ps; + if (_itir.ps > PAGE_SHIFT) + _itir.ps = PAGE_SHIFT; + + ((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */ + ((ia64_itir_t*)itir)->ps = _itir.ps; /* Maybe ps changed! */ pteval2 = lookup_domain_mpa(d, mpaddr, entry); @@ -478,7 +479,7 @@ u64 translate_domain_pte(u64 pteval, u64 #if 0 dprintk(XENLOG_WARNING, "%s:%d " - "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx " + "pteval 0x%lx arflag 0x%lx address 0x%lx _itir 0x%lx " "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n", __func__, __LINE__, pteval, arflags, address, itir__, diff -r a82ee4db96cf -r 9982c9674eb7 xen/arch/ia64/xen/vcpu.c --- a/xen/arch/ia64/xen/vcpu.c Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/arch/ia64/xen/vcpu.c Tue Jul 24 11:26:54 2007 +0200 @@ -2200,93 +2200,99 @@ IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 void vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte, - u64 mp_pte, u64 logps, struct p2m_entry *entry) -{ + u64 mp_pte, u64 itir__, struct p2m_entry *entry) +{ + ia64_itir_t _itir = {.itir = itir__}; unsigned long psr; - unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT; - - check_xen_space_overlap("itc", vaddr, 1UL << logps); + unsigned long ps = (vcpu->domain == dom0) ? _itir.ps : PAGE_SHIFT; + + check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps); // FIXME, must be inlined or potential for nested fault here! - if ((vcpu->domain == dom0) && (logps < PAGE_SHIFT)) + if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT)) panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use " "smaller page size!\n"); - BUG_ON(logps > PAGE_SHIFT); + BUG_ON(_itir.ps > PAGE_SHIFT); vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry); psr = ia64_clear_ic(); pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits. - ia64_itc(IorD, vaddr, pte, ps); // FIXME: look for bigger mappings + // FIXME: look for bigger mappings + ia64_itc(IorD, vaddr, pte, IA64_ITIR_PS_KEY(ps, _itir.key)); ia64_set_psr(psr); // ia64_srlz_i(); // no srls req'd, will rfi later if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) { // FIXME: this is dangerous... vhpt_flush_address ensures these // addresses never get flushed. More work needed if this // ever happens. -//printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L< PAGE_SHIFT) - vhpt_multiple_insert(vaddr, pte, logps); +//printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<_itir.ps); + if (_itir.ps > PAGE_SHIFT) + vhpt_multiple_insert(vaddr, pte, _itir.itir); else - vhpt_insert(vaddr, pte, logps << 2); + vhpt_insert(vaddr, pte, _itir.itir); } // even if domain pagesize is larger than PAGE_SIZE, just put // PAGE_SIZE mapping in the vhpt for now, else purging is complicated - else - vhpt_insert(vaddr, pte, PAGE_SHIFT << 2); + else { + _itir.ps = PAGE_SHIFT; + vhpt_insert(vaddr, pte, _itir.itir); + } } IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa) { - unsigned long pteval, logps = itir_ps(itir); + unsigned long pteval; BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode)); struct p2m_entry entry; - - if (logps < PAGE_SHIFT) + ia64_itir_t _itir = {.itir = itir}; + + if (_itir.ps < PAGE_SHIFT) panic_domain(NULL, "vcpu_itc_d: domain trying to use " "smaller page size!\n"); again: //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize - pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry); + pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry); if (!pteval) return IA64_ILLOP_FAULT; if (swap_rr0) set_one_rr(0x0, PSCB(vcpu, rrs[0])); - vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry); + vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry); if (swap_rr0) set_metaphysical_rr0(); if (p2m_entry_retry(&entry)) { - vcpu_flush_tlb_vhpt_range(ifa, logps); + vcpu_flush_tlb_vhpt_range(ifa, _itir.ps); goto again; } - vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa); + vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, _itir.itir, ifa); return IA64_NO_FAULT; } IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa) { - unsigned long pteval, logps = itir_ps(itir); + unsigned long pteval; BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode)); struct p2m_entry entry; - - if (logps < PAGE_SHIFT) + ia64_itir_t _itir = {.itir = itir}; + + if (_itir.ps < PAGE_SHIFT) panic_domain(NULL, "vcpu_itc_i: domain trying to use " "smaller page size!\n"); again: //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize - pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry); + pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry); if (!pteval) return IA64_ILLOP_FAULT; if (swap_rr0) set_one_rr(0x0, PSCB(vcpu, rrs[0])); - vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry); + vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry); if (swap_rr0) set_metaphysical_rr0(); if (p2m_entry_retry(&entry)) { - vcpu_flush_tlb_vhpt_range(ifa, logps); + vcpu_flush_tlb_vhpt_range(ifa, _itir.ps); goto again; } - vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa); + vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, _itir.itir, ifa); return IA64_NO_FAULT; } diff -r a82ee4db96cf -r 9982c9674eb7 xen/arch/ia64/xen/vhpt.c --- a/xen/arch/ia64/xen/vhpt.c Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/arch/ia64/xen/vhpt.c Tue Jul 24 11:26:54 2007 +0200 @@ -71,7 +71,7 @@ vhpt_erase(unsigned long vhpt_maddr) // initialize cache too??? } -void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps) +void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir) { struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr); unsigned long tag = ia64_ttag (vadr); @@ -80,21 +80,23 @@ void vhpt_insert (unsigned long vadr, un * because the processor may support speculative VHPT walk. */ vlfe->ti_tag = INVALID_TI_TAG; wmb(); - vlfe->itir = logps; + vlfe->itir = itir; vlfe->page_flags = pte | _PAGE_P; *(volatile unsigned long*)&vlfe->ti_tag = tag; } -void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps) -{ - unsigned long mask = (1L << logps) - 1; +void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, + unsigned long itir) +{ + ia64_itir_t _itir = {.itir = itir}; + unsigned long mask = (1L << _itir.ps) - 1; int i; - if (logps-PAGE_SHIFT > 10 && !running_on_sim) { + if (_itir.ps-PAGE_SHIFT > 10 && !running_on_sim) { // if this happens, we may want to revisit this algorithm panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n"); } - if (logps-PAGE_SHIFT > 2) { + if (_itir.ps-PAGE_SHIFT > 2) { // FIXME: Should add counter here to see how often this // happens (e.g. for 16MB pages!) and determine if it // is a performance problem. On a quick look, it takes @@ -102,15 +104,15 @@ void vhpt_multiple_insert(unsigned long // only a few times/second, so OK for now. // An alternate solution would be to just insert the one // 16KB in the vhpt (but with the full mapping)? - //printk("vhpt_multiple_insert: logps-PAGE_SHIFT==%d," + //printk("vhpt_multiple_insert: _itir.ps-PAGE_SHIFT==%d," //"va=%p, pa=%p, pa-masked=%p\n", - //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK, + //_itir.ps-PAGE_SHIFT,vaddr,pte&_PFN_MASK, //(pte&_PFN_MASK)&~mask); } vaddr &= ~mask; pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK); - for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) { - vhpt_insert(vaddr,pte,logps<<2); + for (i = 1L << (_itir.ps-PAGE_SHIFT); i > 0; i--) { + vhpt_insert(vaddr,pte,_itir.itir); vaddr += PAGE_SIZE; } } diff -r a82ee4db96cf -r 9982c9674eb7 xen/include/asm-ia64/linux-xen/asm/processor.h --- a/xen/include/asm-ia64/linux-xen/asm/processor.h Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/include/asm-ia64/linux-xen/asm/processor.h Tue Jul 24 11:26:54 2007 +0200 @@ -533,6 +533,20 @@ ia64_itr (__u64 target_mask, __u64 tr_nu * Insert a translation into the instruction and/or data translation * cache. */ +#ifdef XEN +static inline void +ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 itir) +{ + ia64_setreg(_IA64_REG_CR_ITIR, itir); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); + /* as per EAS2.6, itc must be the last instruction in an instruction group */ + if (target_mask & 0x1) + ia64_itci(pte); + if (target_mask & 0x2) + ia64_itcd(pte); +} +#else static inline void ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 log_page_size) @@ -546,6 +560,7 @@ ia64_itc (__u64 target_mask, __u64 vmadd if (target_mask & 0x2) ia64_itcd(pte); } +#endif /* * Purge a range of addresses from instruction and/or data translation diff -r a82ee4db96cf -r 9982c9674eb7 xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/include/asm-ia64/mm.h Tue Jul 24 11:26:54 2007 +0200 @@ -447,7 +447,7 @@ extern unsigned long dom0vp_expose_p2m(s extern volatile unsigned long *mpt_table; extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); -extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry); +extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir, struct p2m_entry* entry); #define machine_to_phys_mapping mpt_table #define INVALID_M2P_ENTRY (~0UL) diff -r a82ee4db96cf -r 9982c9674eb7 xen/include/asm-ia64/vhpt.h --- a/xen/include/asm-ia64/vhpt.h Tue Jul 24 10:29:43 2007 +0200 +++ b/xen/include/asm-ia64/vhpt.h Tue Jul 24 11:26:54 2007 +0200 @@ -38,9 +38,9 @@ extern void vhpt_init (void); extern void vhpt_init (void); extern void gather_vhpt_stats(void); extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, - unsigned long logps); + unsigned long itir); extern void vhpt_insert (unsigned long vadr, unsigned long pte, - unsigned long logps); + unsigned long itir); void local_vhpt_flush(void); extern void vcpu_vhpt_flush(struct vcpu* v);