# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 166073f830a3ca3ba4495f33e1beed99f09a0e99
# Parent 18b087bafac6197716b7b2290ddb1c7e656916fe
[IA64] VTI: Optimize thash vtlb algorithm
Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
---
xen/arch/ia64/vmx/vmmu.c | 32 +
xen/arch/ia64/vmx/vmx_interrupt.c | 11
xen/arch/ia64/vmx/vmx_ivt.S | 222 +++++-------
xen/arch/ia64/vmx/vmx_process.c | 57 +--
xen/arch/ia64/vmx/vtlb.c | 679 ++++++++++++++------------------------
xen/include/asm-ia64/vmmu.h | 111 +-----
xen/include/asm-ia64/vmx_vcpu.h | 1
7 files changed, 428 insertions(+), 685 deletions(-)
diff -r 18b087bafac6 -r 166073f830a3 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Tue May 30 08:46:21 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c Tue May 30 10:28:59 2006 -0600
@@ -338,6 +338,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
{
+#ifdef VTLB_DEBUG
int slot;
u64 ps, va;
ps = itir_ps(itir);
@@ -348,14 +349,16 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
return IA64_FAULT;
}
+#endif //VTLB_DEBUG
thash_purge_and_insert(vcpu, pte, itir, ifa);
return IA64_NO_FAULT;
}
IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
{
+#ifdef VTLB_DEBUG
int slot;
- u64 ps, va, gpfn;
+ u64 ps, va;
ps = itir_ps(itir);
va = PAGEALIGN(ifa, ps);
slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
@@ -364,9 +367,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
return IA64_FAULT;
}
- gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
- if(VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain,gpfn))
- pte |= VTLB_PTE_IO;
+#endif //VTLB_DEBUG
thash_purge_and_insert(vcpu, pte, itir, ifa);
return IA64_NO_FAULT;
@@ -377,11 +378,14 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
{
+#ifdef VTLB_DEBUG
int index;
+#endif
u64 ps, va, rid;
-
+ thash_data_t * p_itr;
ps = itir_ps(itir);
va = PAGEALIGN(ifa, ps);
+#ifdef VTLB_DEBUG
index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
if (index >=0) {
// generate MCA.
@@ -389,9 +393,11 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
return IA64_FAULT;
}
thash_purge_entries(vcpu, va, ps);
+#endif
vcpu_get_rr(vcpu, va, &rid);
rid = rid& RR_RID_MASK;
- vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va,
rid);
+ p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
+ vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
return IA64_NO_FAULT;
}
@@ -399,11 +405,15 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
{
+#ifdef VTLB_DEBUG
int index;
- u64 ps, va, gpfn, rid;
-
+ u64 gpfn;
+#endif
+ u64 ps, va, rid;
+ thash_data_t * p_dtr;
ps = itir_ps(itir);
va = PAGEALIGN(ifa, ps);
+#ifdef VTLB_DEBUG
index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
if (index>=0) {
// generate MCA.
@@ -412,10 +422,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
}
thash_purge_entries(vcpu, va, ps);
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
- if(__gpfn_is_io(vcpu->domain,gpfn))
+ if(VMX_DOMAIN(vcpu) && _gpfn_is_io(vcpu->domain,gpfn))
pte |= VTLB_PTE_IO;
+#endif
vcpu_get_rr(vcpu, va, &rid);
rid = rid& RR_RID_MASK;
+ p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va,
rid);
vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
return IA64_NO_FAULT;
@@ -432,7 +444,6 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
if (index>=0) {
vcpu->arch.dtrs[index].pte.p=0;
- index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
}
thash_purge_entries(vcpu, va, ps);
return IA64_NO_FAULT;
@@ -447,7 +458,6 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
if (index>=0) {
vcpu->arch.itrs[index].pte.p=0;
- index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
}
thash_purge_entries(vcpu, va, ps);
return IA64_NO_FAULT;
diff -r 18b087bafac6 -r 166073f830a3 xen/arch/ia64/vmx/vmx_interrupt.c
--- a/xen/arch/ia64/vmx/vmx_interrupt.c Tue May 30 08:46:21 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c Tue May 30 10:28:59 2006 -0600
@@ -390,3 +390,14 @@ page_not_present(VCPU *vcpu, u64 vadr)
inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
}
+/* Deal with
+ * Data access rights vector
+ */
+void
+data_access_rights(VCPU *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR */
+ set_ifa_itir_iha (vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
+}
+
diff -r 18b087bafac6 -r 166073f830a3 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S Tue May 30 08:46:21 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_ivt.S Tue May 30 10:28:59 2006 -0600
@@ -141,9 +141,13 @@ ENTRY(vmx_itlb_miss)
mov r16 = cr.ifa
;;
thash r17 = r16
- ;;
ttag r20 = r16
- mov r18 = r17
+ ;;
+ mov r18 = r17
+ adds r28 = VLE_TITAG_OFFSET,r17
+ adds r19 = VLE_CCHAIN_OFFSET, r17
+ ;;
+ ld8 r17 = [r19]
;;
vmx_itlb_loop:
cmp.eq p6,p0 = r0, r17
@@ -161,43 +165,21 @@ vmx_itlb_loop:
(p7)mov r17 = r23;
(p7)br.sptk vmx_itlb_loop
;;
- adds r23 = VLE_PGFLAGS_OFFSET, r17
- adds r24 = VLE_ITIR_OFFSET, r17
- ;;
- ld8 r25 = [r23]
- ld8 r26 = [r24]
- ;;
- cmp.eq p6,p7=r18,r17
-(p6) br vmx_itlb_loop1
- ;;
+ ld8 r25 = [r17]
ld8 r27 = [r18]
- ;;
- extr.u r19 = r27, 56, 8
- extr.u r20 = r25, 56, 8
- ;;
- dep r27 = r20, r27, 56, 8
- dep r25 = r19, r25, 56, 8
- ;;
- st8 [r18] = r25,8
- st8 [r23] = r27
- ;;
- ld8 r28 = [r18]
- ;;
- st8 [r18] = r26,8
- st8 [r24] = r28
- ;;
- ld8 r30 = [r18]
- ;;
- st8 [r18] = r22
- st8 [r16] = r30
- ;;
-vmx_itlb_loop1:
- mov cr.itir = r26
+ ld8 r29 = [r28]
+ ;;
+ st8 [r16] = r29
+ st8 [r28] = r22
+ extr.u r19 = r27, 56, 4
+ ;;
+ dep r27 = r0, r27, 56, 4
+ dep r25 = r19, r25, 56, 4
+ ;;
+ st8 [r18] = r25
+ st8 [r17] = r27
;;
itc.i r25
- ;;
- srlz.i
- ;;
mov r17=cr.isr
mov r23=r31
mov r22=b0
@@ -219,7 +201,7 @@ vmx_itlb_out:
VMX_FAULT(1);
END(vmx_itlb_miss)
- .org vmx_ia64_ivt+0x0800
+ .org vmx_ia64_ivt+0x0800
/////////////////////////////////////////////////////////////////////////////////////////
// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
ENTRY(vmx_dtlb_miss)
@@ -232,9 +214,13 @@ ENTRY(vmx_dtlb_miss)
mov r16 = cr.ifa
;;
thash r17 = r16
- ;;
ttag r20 = r16
- mov r18 = r17
+ ;;
+ mov r18 = r17
+ adds r28 = VLE_TITAG_OFFSET,r17
+ adds r19 = VLE_CCHAIN_OFFSET, r17
+ ;;
+ ld8 r17 = [r19]
;;
vmx_dtlb_loop:
cmp.eq p6,p0 = r0, r17
@@ -252,43 +238,21 @@ vmx_dtlb_loop:
(p7)mov r17 = r23;
(p7)br.sptk vmx_dtlb_loop
;;
- adds r23 = VLE_PGFLAGS_OFFSET, r17
- adds r24 = VLE_ITIR_OFFSET, r17
- ;;
- ld8 r25 = [r23]
- ld8 r26 = [r24]
- ;;
- cmp.eq p6,p7=r18,r17
-(p6) br vmx_dtlb_loop1
- ;;
+ ld8 r25 = [r17]
ld8 r27 = [r18]
- ;;
- extr.u r19 = r27, 56, 8
- extr.u r20 = r25, 56, 8
- ;;
- dep r27 = r20, r27, 56, 8
- dep r25 = r19, r25, 56, 8
- ;;
- st8 [r18] = r25,8
- st8 [r23] = r27
- ;;
- ld8 r28 = [r18]
- ;;
- st8 [r18] = r26,8
- st8 [r24] = r28
- ;;
- ld8 r30 = [r18]
- ;;
- st8 [r18] = r22
- st8 [r16] = r30
- ;;
-vmx_dtlb_loop1:
- mov cr.itir = r26
- ;;
+ ld8 r29 = [r28]
+ ;;
+ st8 [r16] = r29
+ st8 [r28] = r22
+ extr.u r19 = r27, 56, 4
+ ;;
+ dep r27 = r0, r27, 56, 4
+ dep r25 = r19, r25, 56, 4
+ ;;
+ st8 [r18] = r25
+ st8 [r17] = r27
+ ;;
itc.d r25
- ;;
- srlz.d;
- ;;
mov r17=cr.isr
mov r23=r31
mov r22=b0
@@ -310,7 +274,7 @@ vmx_dtlb_out:
VMX_FAULT(2);
END(vmx_dtlb_miss)
- .org vmx_ia64_ivt+0x0c00
+ .org vmx_ia64_ivt+0x0c00
/////////////////////////////////////////////////////////////////////////////////////////
// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
ENTRY(vmx_alt_itlb_miss)
@@ -321,88 +285,84 @@ ENTRY(vmx_alt_itlb_miss)
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p7)br.spnt vmx_fault_3
vmx_alt_itlb_miss_1:
- mov r16=cr.ifa // get address that caused the TLB miss
+ mov r16=cr.ifa // get address that caused the TLB miss
;;
tbit.z p6,p7=r16,63
(p6)br.spnt vmx_fault_3
;;
- movl r17=PAGE_KERNEL
- mov r24=cr.ipsr
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- ;;
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- shr.u r18=r16,55 // move address bit 59 to bit 4
- ;;
- and r18=0x10,r18 // bit 4=address-bit(61)
- or r19=r17,r19 // insert PTE control bits into r19
- ;;
- movl r20=IA64_GRANULE_SHIFT<<2
- or r19=r19,r18 // set bit 4 (uncached) if the access was to
region 6
- ;;
- mov cr.itir=r20
- ;;
- srlz.i
- ;;
- itc.i r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
+ movl r17=PAGE_KERNEL
+ mov r24=cr.ipsr
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ ;;
+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+ shr.u r18=r16,55 // move address bit 59 to bit 4
+ ;;
+ and r18=0x10,r18 // bit 4=address-bit(61)
+ or r19=r17,r19 // insert PTE control bits into r19
+ ;;
+ movl r20=IA64_GRANULE_SHIFT<<2
+ or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
+ ;;
+ mov cr.itir=r20
+ ;;
+ itc.i r19 // insert the TLB entry
+ mov pr=r31,-1
+ rfi
VMX_FAULT(3);
END(vmx_alt_itlb_miss)
- .org vmx_ia64_ivt+0x1000
+ .org vmx_ia64_ivt+0x1000
/////////////////////////////////////////////////////////////////////////////////////////
// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
ENTRY(vmx_alt_dtlb_miss)
VMX_DBG_FAULT(4)
- mov r31=pr
+ mov r31=pr
mov r29=cr.ipsr;
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p7)br.spnt vmx_fault_4
vmx_alt_dtlb_miss_1:
- mov r16=cr.ifa // get address that caused the TLB miss
+ mov r16=cr.ifa // get address that caused the TLB miss
;;
#ifdef CONFIG_VIRTUAL_FRAME_TABLE
- // Test for the address of virtual frame_table
- shr r22=r16,56;;
- cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
-(p8) br.cond.sptk frametable_miss ;;
+ // Test for the address of virtual frame_table
+ shr r22=r16,56;;
+ cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
+(p8)br.cond.sptk frametable_miss ;;
#endif
tbit.z p6,p7=r16,63
(p6)br.spnt vmx_fault_4
;;
- movl r17=PAGE_KERNEL
- mov r20=cr.isr
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- mov r24=cr.ipsr
- ;;
- and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
- tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
- shr.u r18=r16,55 // move address bit 59 to bit 4
- and r19=r19,r16 // clear ed, reserved bits, and
PTE control bits
- tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
- ;;
- and r18=0x10,r18 // bit 4=address-bit(61)
-(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
- dep r24=-1,r24,IA64_PSR_ED_BIT,1
- or r19=r19,r17 // insert PTE control bits into r19
- ;;
- or r19=r19,r18 // set bit 4 (uncached) if the access was to
region 6
-(p6) mov cr.ipsr=r24
- movl r20=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r20
- ;;
- srlz.i
- ;;
-(p7) itc.d r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
+ movl r17=PAGE_KERNEL
+ mov r20=cr.isr
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ mov r24=cr.ipsr
+ ;;
+ and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
+ tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
+ shr.u r18=r16,55 // move address bit 59 to bit 4
+ and r19=r19,r16 // clear ed, reserved bits, and
PTE control bits
+ tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
+ ;;
+ and r18=0x10,r18 // bit 4=address-bit(61)
+(p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
+ dep r24=-1,r24,IA64_PSR_ED_BIT,1
+ or r19=r19,r17 // insert PTE control bits into
r19
+ ;;
+ or r19=r19,r18 // set bit 4 (uncached) if the
access was to region 6
+(p6)mov cr.ipsr=r24
+ movl r20=IA64_GRANULE_SHIFT<<2
+ ;;
+ mov cr.itir=r20
+ ;;
+(p7)itc.d r19 // insert the TLB entry
+ mov pr=r31,-1
+ rfi
VMX_FAULT(4);
END(vmx_alt_dtlb_miss)
- .org vmx_ia64_ivt+0x1400
+ .org vmx_ia64_ivt+0x1400
/////////////////////////////////////////////////////////////////////////////////////////
// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
ENTRY(vmx_nested_dtlb_miss)
diff -r 18b087bafac6 -r 166073f830a3 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c Tue May 30 08:46:21 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c Tue May 30 10:28:59 2006 -0600
@@ -302,7 +302,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
{
IA64_PSR vpsr;
int type=ISIDE_TLB;
- u64 vhpt_adr, gppa;
+ u64 vhpt_adr, gppa, pteval, rr, itir;
ISR misr;
// REGS *regs;
thash_data_t *data;
@@ -314,18 +314,6 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
vpsr.val = vmx_vcpu_get_psr(v);
misr.val=VMX(v,cr_isr);
-/* TODO
- if(v->domain->id && vec == 2 &&
- vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
- emulate_ins(&v);
- return;
- }
-*/
-/* if(vadr == 0x1ea18c00 ){
- ia64_clear_ic();
- while(1);
- }
- */
if(is_physical_mode(v)&&(!(vadr<<1>>62))){
if(vec==2){
if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
@@ -338,31 +326,24 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
}
if(vec == 1) type = ISIDE_TLB;
else if(vec == 2) type = DSIDE_TLB;
- else panic_domain(regs,"wrong vec:%0xlx\n",vec);
+ else panic_domain(regs,"wrong vec:%lx\n",vec);
// prepare_if_physical_mode(v);
if((data=vtlb_lookup(v, vadr,type))!=0){
-// gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
-// if(v->domain!=dom0&&type==DSIDE_TLB &&
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
+// gppa =
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
+// if(v->domain!=dom0&&type==DSIDE_TLB &&
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
- gppa =
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
- emulate_io_inst(v, gppa, data->ma);
+ if(data->pl >= ((regs->cr_ipsr>>IA64_PSR_CPL0_BIT)&3)){
+ gppa =
(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
+ emulate_io_inst(v, gppa, data->ma);
+ }else{
+ data_access_rights(v, vadr);
+ }
return IA64_FAULT;
}
-// if ( data->ps != vrr.ps ) {
-// machine_tlb_insert(v, data);
-// }
-// else {
-/* if ( data->contiguous&&(!data->tc)){
- machine_tlb_insert(v, data);
- }
- else{
- */
- thash_vhpt_insert(&v->arch.vhpt,data->page_flags, data->itir
,vadr);
-// }
-// }
+ thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
}else if(type == DSIDE_TLB){
if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
if(vpsr.ic){
@@ -381,7 +362,13 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
}
} else{
vmx_vcpu_thash(v, vadr, &vhpt_adr);
- if(vhpt_lookup(vhpt_adr) || vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){
+ if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
+ if (pteval & _PAGE_P){
+ vcpu_get_rr(v, vadr, &rr);
+ itir = rr&(RR_RID_MASK | RR_PS_MASK);
+ thash_purge_and_insert(v, pteval, itir , vadr);
+ return IA64_NO_FAULT;
+ }
if(vpsr.ic){
vcpu_set_isr(v, misr.val);
dtlb_fault(v, vadr);
@@ -423,7 +410,13 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
return IA64_FAULT;
} else{
vmx_vcpu_thash(v, vadr, &vhpt_adr);
- if(vhpt_lookup(vhpt_adr) || vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){
+ if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
+ if (pteval & _PAGE_P){
+ vcpu_get_rr(v, vadr, &rr);
+ itir = rr&(RR_RID_MASK | RR_PS_MASK);
+ thash_purge_and_insert(v, pteval, itir , vadr);
+ return IA64_NO_FAULT;
+ }
if(!vpsr.ic){
misr.ni=1;
}
diff -r 18b087bafac6 -r 166073f830a3 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Tue May 30 08:46:21 2006 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c Tue May 30 10:28:59 2006 -0600
@@ -36,27 +36,26 @@ thash_data_t *__alloc_chain(thash_cb_t *
static void cch_mem_init(thash_cb_t *hcb)
{
- thash_data_t *p, *q;
+ int num;
+ thash_data_t *p;
hcb->cch_freelist = p = hcb->cch_buf;
-
- for ( q=p+1; (u64)(q + 1) <= (u64)hcb->cch_buf + hcb->cch_sz;
- p++, q++ ) {
- p->next = q;
- }
+ num = (hcb->cch_sz/sizeof(thash_data_t))-1;
+ do{
+ p->next =p+1;
+ p++;
+ num--;
+ }while(num);
p->next = NULL;
}
static thash_data_t *cch_alloc(thash_cb_t *hcb)
{
thash_data_t *p;
-
if ( (p = hcb->cch_freelist) != NULL ) {
hcb->cch_freelist = p->next;
- return p;
- }else{
- return NULL;
- }
+ }
+ return p;
}
static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
@@ -101,17 +100,13 @@ static void __rem_hash_head(thash_cb_t *
static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
{
thash_data_t *next=hash->next;
-
-/* if ( hcb->remove_notifier ) {
- (hcb->remove_notifier)(hcb,hash);
- } */
- if ( next != NULL ) {
+ if ( next) {
next->len=hash->len-1;
*hash = *next;
cch_free (hcb, next);
}
else {
- INVALIDATE_HASH_HEADER(hcb, hash);
+ hash->ti=1;
}
}
@@ -145,125 +140,109 @@ thash_data_t *__vtr_lookup(VCPU *vcpu, u
}
-/*
- * Get the machine format of VHPT entry.
- * PARAS:
- * 1: tlb: means the tlb format hash entry converting to VHPT.
- * 2: va means the guest virtual address that must be coverd by
- * the translated machine VHPT.
- * 3: vhpt: means the machine format VHPT converting from tlb.
- * NOTES:
- * 1: In case of the machine address is discontiguous,
- * "tlb" needs to be covered by several machine VHPT. va
- * is used to choice one of them.
- * 2: Foreign map is supported in this API.
- * RETURN:
- * 0/1: means successful or fail.
- *
- */
-int __tlb_to_vhpt(thash_cb_t *hcb, thash_data_t *vhpt, u64 va)
-{
- u64 padr,pte;
- ASSERT ( hcb->ht == THASH_VHPT );
- padr = vhpt->ppn >>(vhpt->ps-ARCH_PAGE_SHIFT)<<vhpt->ps;
- padr += va&((1UL<<vhpt->ps)-1);
- pte=lookup_domain_mpa(current->domain,padr);
- if((pte>>56))
- return 0;
- vhpt->etag = ia64_ttag(va);
- vhpt->ps = PAGE_SHIFT;
- vhpt->ppn =
(pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
- vhpt->next = 0;
- return 1;
-}
-
-static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash)
+static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
{
thash_data_t *p;
- if(hash->next){
- p=hash->next;
- while(p->next)
- p=p->next;
- p->next=hcb->cch_freelist;
- hcb->cch_freelist=hash->next;
- hash->next=0;
+ int i=0;
+
+ p=hash;
+ for(i=0; i < MAX_CCN_DEPTH; i++){
+ p=p->next;
+ }
+ p->next=hcb->cch_freelist;
+ hcb->cch_freelist=hash->next;
+ hash->len=0;
+ hash->next=0;
+}
+
+
+
+
+static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
+{
+ u64 tag;
+ thash_data_t *head, *cch;
+ pte = pte & ~PAGE_FLAGS_RV_MASK;
+
+ head = (thash_data_t *)ia64_thash(ifa);
+ tag = ia64_ttag(ifa);
+ if( INVALID_VHPT(head) ) {
+ head->page_flags = pte;
+ head->etag = tag;
+ return;
+ }
+
+ if(head->len>=MAX_CCN_DEPTH){
+ thash_recycle_cch(hcb, head);
+ cch = cch_alloc(hcb);
+ }
+ else{
+ cch = __alloc_chain(hcb);
+ }
+ cch->page_flags=head->page_flags;
+ cch->etag=head->etag;
+ cch->next=head->next;
+ head->page_flags=pte;
+ head->etag=tag;
+ head->next = cch;
+ head->len = cch->len+1;
+ cch->len = 0;
+ return;
+}
+
+void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va)
+{
+ u64 phy_pte;
+ phy_pte=translate_phy_pte(v, &pte, itir, va);
+ vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
+}
+/*
+ * vhpt lookup
+ */
+
+thash_data_t * vhpt_lookup(u64 va)
+{
+ thash_data_t *hash, *head;
+ u64 tag, pte;
+ head = (thash_data_t *)ia64_thash(va);
+ hash=head;
+ tag = ia64_ttag(va);
+ do{
+ if(hash->etag == tag)
+ break;
+ hash=hash->next;
+ }while(hash);
+ if(hash && hash!=head){
+ pte = hash->page_flags;
+ hash->page_flags = head->page_flags;
+ head->page_flags = pte;
+ tag = hash->etag;
+ hash->etag = head->etag;
+ head->etag = tag;
+ head->len = hash->len;
hash->len=0;
- }
-}
-
-/* vhpt only has entries with PAGE_SIZE page size */
-
-void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
-{
- thash_data_t vhpt_entry, *hash_table, *cch;
- vhpt_entry.page_flags = pte & ~PAGE_FLAGS_RV_MASK;
- vhpt_entry.itir=itir;
-
- if ( !__tlb_to_vhpt(hcb, &vhpt_entry, ifa) ) {
- return;
- //panic("Can't convert to machine VHPT entry\n");
- }
-
- hash_table = (thash_data_t *)ia64_thash(ifa);
- if( INVALID_VHPT(hash_table) ) {
- *hash_table = vhpt_entry;
- hash_table->next = 0;
- return;
- }
-
- cch = hash_table;
- while(cch){
- if(cch->etag == vhpt_entry.etag){
- if(cch->ppn == vhpt_entry.ppn)
- return;
- else
- while(1);
- }
- cch = cch->next;
- }
-
- if(hash_table->len>=MAX_CCN_DEPTH){
- thash_remove_cch(hcb, hash_table);
- cch = cch_alloc(hcb);
- *cch = *hash_table;
- *hash_table = vhpt_entry;
- hash_table->len = 1;
- hash_table->next = cch;
- return;
- }
-
- // TODO: Add collision chain length limitation.
- cch = __alloc_chain(hcb);
- if(cch == NULL){
- *hash_table = vhpt_entry;
- hash_table->next = 0;
- }else{
- *cch = *hash_table;
- *hash_table = vhpt_entry;
- hash_table->next = cch;
- hash_table->len = cch->len + 1;
- cch->len = 0;
-
- }
- return /*hash_table*/;
-}
-
-/*
- * vhpt lookup
- */
-
-thash_data_t * vhpt_lookup(u64 va)
-{
- thash_data_t *hash;
- u64 tag;
- hash = (thash_data_t *)ia64_thash(va);
- tag = ia64_ttag(va);
- while(hash){
- if(hash->etag == tag)
- return hash;
- hash=hash->next;
- }
- return NULL;
+ return head;
+ }
+ return hash;
+}
+
+u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+{
+ u64 ret;
+ vhpt_lookup(iha);
+ asm volatile ("rsm psr.ic|psr.i;;"
+ "srlz.d;;"
+ "ld8.s r9=[%1];;"
+ "tnat.nz p6,p7=r9;;"
+ "(p6) mov %0=1;"
+ "(p6) mov r9=r0;"
+ "(p7) mov %0=r0;"
+ "(p7) st8 [%2]=r9;;"
+ "ssm psr.ic;;"
+ "srlz.d;;"
+ "ssm psr.i;;"
+ : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+ return ret;
}
@@ -310,7 +289,6 @@ static void vtlb_purge(thash_cb_t *hcb,
/*
* purge VHPT and machine TLB
*/
-
static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
{
thash_data_t *hash_table, *prev, *next;
@@ -332,7 +310,7 @@ static void vhpt_purge(thash_cb_t *hcb,
prev->next=next->next;
cch_free(hcb,next);
hash_table->len--;
- break;
+ break;
}
prev=next;
next=next->next;
@@ -347,16 +325,21 @@ static void vhpt_purge(thash_cb_t *hcb,
* Recycle all collisions chain in VTLB or VHPT.
*
*/
-
-void thash_recycle_cch(thash_cb_t *hcb)
-{
- thash_data_t *hash_table;
-
- hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
- for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
- thash_remove_cch(hcb,hash_table);
- }
-}
+void thash_recycle_cch_all(thash_cb_t *hcb)
+{
+ int num;
+ thash_data_t *head;
+ head=hcb->hash;
+ num = (hcb->hash_sz/sizeof(thash_data_t));
+ do{
+ head->len = 0;
+ head->next = 0;
+ head++;
+ num--;
+ }while(num);
+ cch_mem_init(hcb);
+}
+
thash_data_t *__alloc_chain(thash_cb_t *hcb)
{
@@ -364,7 +347,7 @@ thash_data_t *__alloc_chain(thash_cb_t *
cch = cch_alloc(hcb);
if(cch == NULL){
- thash_recycle_cch(hcb);
+ thash_recycle_cch_all(hcb);
cch = cch_alloc(hcb);
}
return cch;
@@ -385,51 +368,38 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
/* int flag; */
ia64_rr vrr;
/* u64 gppn, ppns, ppne; */
- u64 tag, ps;
- ps = itir_ps(itir);
+ u64 tag;
vcpu_get_rr(current, va, &vrr.rrval);
- if (vrr.ps != ps) {
+#ifdef VTLB_DEBUG
+ if (vrr.ps != itir_ps(itir)) {
// machine_tlb_insert(hcb->vcpu, entry);
panic_domain(NULL, "not preferred ps with va: 0x%lx vrr.ps=%d
ps=%ld\n",
- va, vrr.ps, ps);
+ va, vrr.ps, itir_ps(itir));
return;
}
+#endif
hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
if( INVALID_TLB(hash_table) ) {
hash_table->page_flags = pte;
hash_table->itir=itir;
hash_table->etag=tag;
hash_table->next = 0;
- }
- else if (hash_table->len>=MAX_CCN_DEPTH){
- thash_remove_cch(hcb, hash_table);
+ return;
+ }
+ if (hash_table->len>=MAX_CCN_DEPTH){
+ thash_recycle_cch(hcb, hash_table);
cch = cch_alloc(hcb);
- *cch = *hash_table;
- hash_table->page_flags = pte;
- hash_table->itir=itir;
- hash_table->etag=tag;
- hash_table->len = 1;
- hash_table->next = cch;
- }
-
+ }
else {
- // TODO: Add collision chain length limitation.
cch = __alloc_chain(hcb);
- if(cch == NULL){
- hash_table->page_flags = pte;
- hash_table->itir=itir;
- hash_table->etag=tag;
- hash_table->next = 0;
- }else{
- *cch = *hash_table;
- hash_table->page_flags = pte;
- hash_table->itir=itir;
- hash_table->etag=tag;
- hash_table->next = cch;
- hash_table->len = cch->len + 1;
- cch->len = 0;
- }
- }
+ }
+ *cch = *hash_table;
+ hash_table->page_flags = pte;
+ hash_table->itir=itir;
+ hash_table->etag=tag;
+ hash_table->next = cch;
+ hash_table->len = cch->len + 1;
+ cch->len = 0;
return ;
}
@@ -473,6 +443,23 @@ void thash_purge_entries(VCPU *v, u64 va
vhpt_purge(&v->arch.vhpt, va, ps);
}
+u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
+{
+ u64 ps, addr;
+ union pte_flags phy_pte;
+ ps = itir_ps(itir);
+ phy_pte.val = *pte;
+ addr = *pte;
+ addr = ((addr & _PAGE_PPN_MASK)>>ps<<ps)|(va&((1UL<<ps)-1));
+ addr = lookup_domain_mpa(v->domain, addr);
+ if(addr & GPFN_IO_MASK){
+ *pte |= VTLB_PTE_IO;
+ return -1;
+ }
+ phy_pte.ppn = addr >> ARCH_PAGE_SHIFT;
+ return phy_pte.val;
+}
+
/*
* Purge overlap TCs and then insert the new entry to emulate itc ops.
@@ -480,59 +467,79 @@ void thash_purge_entries(VCPU *v, u64 va
*/
void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa)
{
- u64 ps, va;
+ u64 ps;//, va;
+ u64 phy_pte;
ps = itir_ps(itir);
- va = PAGEALIGN(ifa,ps);
- if(vcpu_quick_region_check(v->arch.tc_regions,va))
- vtlb_purge(&v->arch.vtlb, va, ps);
- vhpt_purge(&v->arch.vhpt, va, ps);
- if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO)){
- vtlb_insert(&v->arch.vtlb, pte, itir, va);
- vcpu_quick_region_set(PSCBX(v,tc_regions),va);
- }
- if(!(pte&VTLB_PTE_IO)){
- va = PAGEALIGN(ifa,PAGE_SHIFT);
- thash_vhpt_insert(&v->arch.vhpt, pte, itir, va);
- }
-}
-
-
+
+ if(VMX_DOMAIN(v)){
+ phy_pte = translate_phy_pte(v, &pte, itir, ifa);
+ if(ps==PAGE_SHIFT){
+ if(!(pte&VTLB_PTE_IO)){
+ vhpt_purge(&v->arch.vhpt, ifa, ps);
+ vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
+ }
+ else{
+ vhpt_purge(&v->arch.vhpt, ifa, ps);
+ vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
+ vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+ }
+ }
+ else{
+ vhpt_purge(&v->arch.vhpt, ifa, ps);
+ vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
+ vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+ if(!(pte&VTLB_PTE_IO)){
+ vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
+ }
+ }
+ }
+ else{
+ phy_pte = translate_phy_pte(v, &pte, itir, ifa);
+ if(ps!=PAGE_SHIFT){
+ vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
+ vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
+ }
+ machine_tlb_purge(ifa, ps);
+ vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
+ }
+}
/*
* Purge all TCs or VHPT entries including those in Hash table.
*
*/
-// TODO: add sections.
+//TODO: add sections.
void thash_purge_all(VCPU *v)
{
- thash_data_t *hash_table;
- /* thash_data_t *entry; */
- thash_cb_t *hcb,*vhpt;
- /* u64 i, start, end; */
- hcb =&v->arch.vtlb;
+ int num;
+ thash_data_t *head;
+ thash_cb_t *vtlb,*vhpt;
+ vtlb =&v->arch.vtlb;
vhpt =&v->arch.vhpt;
-#ifdef VTLB_DEBUG
- extern u64 sanity_check;
- static u64 statistics_before_purge_all=0;
- if ( statistics_before_purge_all ) {
- sanity_check = 1;
- check_vtlb_sanity(hcb);
- }
-#endif
- ASSERT ( hcb->ht == THASH_TLB );
-
- hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
- for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
- INVALIDATE_TLB_HEADER(hash_table);
- }
- cch_mem_init (hcb);
-
- hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
- for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
- INVALIDATE_VHPT_HEADER(hash_table);
- }
- cch_mem_init (vhpt);
+
+ head=vtlb->hash;
+ num = (vtlb->hash_sz/sizeof(thash_data_t));
+ do{
+ head->page_flags = 0;
+ head->etag = 1UL<<63;
+ head->next = 0;
+ head++;
+ num--;
+ }while(num);
+ cch_mem_init(vtlb);
+
+ head=vhpt->hash;
+ num = (vhpt->hash_sz/sizeof(thash_data_t));
+ do{
+ head->page_flags = 0;
+ head->etag = 1UL<<63;
+ head->next = 0;
+ head++;
+ num--;
+ }while(num);
+ cch_mem_init(vhpt);
+
local_flush_tlb_all();
}
@@ -547,7 +554,7 @@ void thash_purge_all(VCPU *v)
thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
{
- thash_data_t *hash_table, *cch;
+ thash_data_t *cch;
u64 tag;
ia64_rr vrr;
thash_cb_t * hcb= &v->arch.vtlb;
@@ -559,18 +566,14 @@ thash_data_t *vtlb_lookup(VCPU *v, u64 v
if(vcpu_quick_region_check(v->arch.tc_regions,va)==0)
return NULL;
-
vcpu_get_rr(v,va,&vrr.rrval);
- hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
-
- if ( INVALID_ENTRY(hcb, hash_table ) )
- return NULL;
-
-
- for (cch=hash_table; cch; cch = cch->next) {
+ cch = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
+
+ do{
if(cch->etag == tag)
return cch;
- }
+ cch = cch->next;
+ }while(cch);
return NULL;
}
@@ -580,198 +583,32 @@ thash_data_t *vtlb_lookup(VCPU *v, u64 v
*/
void thash_init(thash_cb_t *hcb, u64 sz)
{
- thash_data_t *hash_table;
-
- cch_mem_init (hcb);
+ int num;
+ thash_data_t *head, *p;
+
hcb->pta.val = (unsigned long)hcb->hash;
hcb->pta.vf = 1;
hcb->pta.ve = 1;
hcb->pta.size = sz;
-// hcb->get_rr_fn = vmmu_get_rr;
- ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
- hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-
- for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
- INVALIDATE_HASH_HEADER(hcb,hash_table);
- }
-}
-
-#ifdef VTLB_DEBUG
-/*
-static u64 cch_length_statistics[MAX_CCH_LENGTH+1];
-u64 sanity_check=0;
-u64 vtlb_chain_sanity(thash_cb_t *vtlb, thash_cb_t *vhpt, thash_data_t *hash)
-{
- thash_data_t *cch;
- thash_data_t *ovl;
- search_section_t s_sect;
- u64 num=0;
-
- s_sect.v = 0;
- for (cch=hash; cch; cch=cch->next) {
- ovl = thash_find_overlap(vhpt, cch, s_sect);
- while ( ovl != NULL ) {
- ovl->checked = 1;
- ovl = (vhpt->next_overlap)(vhpt);
- };
- num ++;
- }
- if ( num >= MAX_CCH_LENGTH ) {
- cch_length_statistics[MAX_CCH_LENGTH] ++;
- }
- else {
- cch_length_statistics[num] ++;
- }
- return num;
-}
-
-void check_vtlb_sanity(thash_cb_t *vtlb)
-{
-// struct page_info *page;
- u64 hash_num, i, psr;
- static u64 check_ok_num, check_fail_num,check_invalid;
-// void *vb1, *vb2;
- thash_data_t *hash, *cch;
- thash_data_t *ovl;
- search_section_t s_sect;
- thash_cb_t *vhpt = vtlb->vhpt;
- u64 invalid_ratio;
-
- if ( sanity_check == 0 ) return;
- sanity_check --;
- s_sect.v = 0;
-// page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
-// if ( page == NULL ) {
-// panic("No enough contiguous memory for init_domain_mm\n");
-// };
-// vb1 = page_to_virt(page);
-// printf("Allocated page=%lp vbase=%lp\n", page, vb1);
-// vb2 = vb1 + vtlb->hash_sz;
- hash_num = vhpt->hash_sz / sizeof(thash_data_t);
-// printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
- printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n",
- vtlb, vtlb->hash,vtlb->hash_sz,
- vhpt, vhpt->hash, vhpt->hash_sz);
- //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
- //memcpy(vb2, vhpt->hash, vhpt->hash_sz);
- for ( i=0; i <
sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
- cch_length_statistics[i] = 0;
- }
-
- local_irq_save(psr);
-
- hash = vhpt->hash;
- for (i=0; i < hash_num; i++) {
- if ( !INVALID_ENTRY(vhpt, hash) ) {
- for ( cch= hash; cch; cch=cch->next) {
- cch->checked = 0;
- }
- }
- hash ++;
- }
- printf("Done vhpt clear checked flag, hash_num=0x%lx\n", hash_num);
- check_invalid = 0;
- check_ok_num=0;
- hash = vtlb->hash;
- for ( i=0; i< hash_num; i++ ) {
- if ( !INVALID_ENTRY(vtlb, hash) ) {
- check_ok_num += vtlb_chain_sanity(vtlb, vhpt, hash);
- }
- else {
- check_invalid++;
- }
- hash ++;
- }
- printf("Done vtlb entry check, hash=%p\n", hash);
- printf("check_ok_num = 0x%lx check_invalid=0x%lx\n",
check_ok_num,check_invalid);
- invalid_ratio = 1000*check_invalid / hash_num;
- printf("%02ld.%01ld%% entries are invalid\n",
- invalid_ratio/10, invalid_ratio % 10 );
- for (i=0; i<NDTRS; i++) {
- ovl = thash_find_overlap(vhpt, &vtlb->ts->dtr[i], s_sect);
- while ( ovl != NULL ) {
- ovl->checked = 1;
- ovl = (vhpt->next_overlap)(vhpt);
- };
- }
- printf("Done dTR\n");
- for (i=0; i<NITRS; i++) {
- ovl = thash_find_overlap(vhpt, &vtlb->ts->itr[i], s_sect);
- while ( ovl != NULL ) {
- ovl->checked = 1;
- ovl = (vhpt->next_overlap)(vhpt);
- };
- }
- printf("Done iTR\n");
- check_fail_num = 0;
- check_invalid = 0;
- check_ok_num=0;
- hash = vhpt->hash;
- for (i=0; i < hash_num; i++) {
- if ( !INVALID_ENTRY(vhpt, hash) ) {
- for ( cch= hash; cch; cch=cch->next) {
- if ( !cch->checked ) {
- printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch);
- check_fail_num ++;
- }
- else {
- check_ok_num++;
- }
- }
- }
- else {
- check_invalid ++;
- }
- hash ++;
- }
- local_irq_restore(psr);
- printf("check_ok_num=0x%lx check_fail_num=0x%lx check_invalid=0x%lx\n",
- check_ok_num, check_fail_num, check_invalid);
- //memcpy(vtlb->hash, vb1, vtlb->hash_sz);
- //memcpy(vhpt->hash, vb2, vhpt->hash_sz);
- printf("The statistics of collision chain length is listed\n");
- for ( i=0; i <
sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
- printf("CCH length=%02ld, chain number=%ld\n", i,
cch_length_statistics[i]);
- }
-// free_domheap_pages(page, VCPU_TLB_ORDER);
- printf("Done check_vtlb\n");
-}
-
-void dump_vtlb(thash_cb_t *vtlb)
-{
- static u64 dump_vtlb=0;
- thash_data_t *hash, *cch, *tr;
- u64 hash_num,i;
-
- if ( dump_vtlb == 0 ) return;
- dump_vtlb --;
- hash_num = vtlb->hash_sz / sizeof(thash_data_t);
- hash = vtlb->hash;
-
- printf("Dump vTC\n");
- for ( i = 0; i < hash_num; i++ ) {
- if ( !INVALID_ENTRY(vtlb, hash) ) {
- printf("VTLB at hash=%p\n", hash);
- for (cch=hash; cch; cch=cch->next) {
- printf("Entry %p va=%lx ps=%d rid=%d\n",
- cch, cch->vadr, cch->ps, cch->rid);
- }
- }
- hash ++;
- }
- printf("Dump vDTR\n");
- for (i=0; i<NDTRS; i++) {
- tr = &DTR(vtlb,i);
- printf("Entry %p va=%lx ps=%d rid=%d\n",
- tr, tr->vadr, tr->ps, tr->rid);
- }
- printf("Dump vITR\n");
- for (i=0; i<NITRS; i++) {
- tr = &ITR(vtlb,i);
- printf("Entry %p va=%lx ps=%d rid=%d\n",
- tr, tr->vadr, tr->ps, tr->rid);
- }
- printf("End of vTLB dump\n");
-}
-*/
-#endif
+ hcb->cch_rec_head = hcb->hash;
+
+ head=hcb->hash;
+ num = (hcb->hash_sz/sizeof(thash_data_t));
+ do{
+ head->itir = PAGE_SHIFT<<2;
+ head->next = 0;
+ head++;
+ num--;
+ }while(num);
+
+ hcb->cch_freelist = p = hcb->cch_buf;
+ num = (hcb->cch_sz/sizeof(thash_data_t))-1;
+ do{
+ p->itir = PAGE_SHIFT<<2;
+ p->next =p+1;
+ p++;
+ num--;
+ }while(num);
+ p->itir = PAGE_SHIFT<<2;
+ p->next = NULL;
+}
diff -r 18b087bafac6 -r 166073f830a3 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h Tue May 30 08:46:21 2006 -0600
+++ b/xen/include/asm-ia64/vmmu.h Tue May 30 10:28:59 2006 -0600
@@ -137,21 +137,19 @@ typedef struct thash_data {
} thash_data_t;
#define INVALIDATE_VHPT_HEADER(hdata) \
-{ ((hdata)->page_flags)=0; \
- ((hdata)->ti)=1; \
- ((hdata)->next)=0; }
-
-#define INVALIDATE_TLB_HEADER(hdata) \
-{ ((hdata)->page_flags)=0; \
- ((hdata)->ti)=1; \
- ((hdata)->next)=0; }
+{ ((hdata)->page_flags)=0; \
+ ((hdata)->itir)=PAGE_SHIFT<<2; \
+ ((hdata)->etag)=1UL<<63; \
+ ((hdata)->next)=0;}
+
+#define INVALIDATE_TLB_HEADER(hash) INVALIDATE_VHPT_HEADER(hash)
+
+#define INVALIDATE_HASH_HEADER(hcb,hash) INVALIDATE_VHPT_HEADER(hash)
#define INVALID_VHPT(hdata) ((hdata)->ti)
#define INVALID_TLB(hdata) ((hdata)->ti)
#define INVALID_TR(hdata) (!(hdata)->p)
#define INVALID_ENTRY(hcb, hdata) INVALID_VHPT(hdata)
-
-/* ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) */
/*
@@ -189,80 +187,17 @@ typedef void (REM_THASH_FN)(struct thash
typedef void (REM_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry);
typedef void (INS_THASH_FN)(struct thash_cb *hcb, thash_data_t *entry, u64 va);
-//typedef struct tlb_special {
-// thash_data_t itr[NITRS];
-// thash_data_t dtr[NDTRS];
-// struct thash_cb *vhpt;
-//} tlb_special_t;
-
-//typedef struct vhpt_cb {
- //u64 pta; // pta value.
-// GET_MFN_FN *get_mfn;
-// TTAG_FN *tag_func;
-//} vhpt_special;
-/*
-typedef struct thash_internal {
- thash_data_t *hash_base;
- thash_data_t *cur_cch; // head of overlap search
- int rid;
- int ps;
- union {
- u64 tag; // for VHPT
- struct { // for TLB
- char _tr_idx; // -1: means done of TR search
- char cl;
- search_section_t s_sect; // search section combinations
- };
- };
- u64 _curva; // current address to search
- u64 _eva;
-} thash_internal_t;
- */
-//#define THASH_CB_MAGIC 0x55aa00aa55aa55aaUL
+
typedef struct thash_cb {
- /* THASH base information */
-// THASH_TYPE ht; // For TLB or VHPT
-// u64 magic;
- thash_data_t *hash; // hash table pointer, aligned at thash_sz.
- u64 hash_sz; // size of above data.
- void *cch_buf; // base address of collision chain.
- u64 cch_sz; // size of above data.
-// THASH_FN *hash_func;
-// GET_RR_FN *get_rr_fn;
-// RECYCLE_FN *recycle_notifier;
- thash_data_t *cch_freelist;
-// struct vcpu *vcpu;
- PTA pta;
-// struct thash_cb *vhpt;
- /* VTLB/VHPT common information */
-// FIND_OVERLAP_FN *find_overlap;
-// FIND_NEXT_OVL_FN *next_overlap;
-// REM_THASH_FN *rem_hash; // remove hash entry.
-// INS_THASH_FN *ins_hash; // insert hash entry.
-// REM_NOTIFIER_FN *remove_notifier;
- /* private information */
-// thash_internal_t priv;
-// union {
-// tlb_special_t *ts;
-// vhpt_special *vs;
-// };
- // Internal positon information, buffer and storage etc. TBD
+ /* THASH base information */
+ thash_data_t *hash; // hash table pointer, aligned at thash_sz.
+ u64 hash_sz; // size of above data.
+ void *cch_buf; // base address of collision chain.
+ u64 cch_sz; // size of above data.
+ thash_data_t *cch_freelist;
+ thash_data_t *cch_rec_head; // cch recycle header
+ PTA pta;
} thash_cb_t;
-
-//#define ITR(hcb,id) ((hcb)->ts->itr[id])
-//#define DTR(hcb,id) ((hcb)->ts->dtr[id])
-#define INVALIDATE_HASH_HEADER(hcb,hash) INVALIDATE_TLB_HEADER(hash)
-/* \
-{ if ((hcb)->ht==THASH_TLB){ \
- INVALIDATE_TLB_HEADER(hash); \
- }else{ \
- INVALIDATE_VHPT_HEADER(hash); \
- } \
-}
- */
-#define PURGABLE_ENTRY(hcb,en) 1
-// ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
-
/*
* Initialize internal control data before service.
@@ -281,7 +216,6 @@ extern void thash_init(thash_cb_t *hcb,
* 4: Return the entry in hash table or collision chain.
*
*/
-extern void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa);
//extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
//extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va,
int idx);
extern int vtr_find_overlap(struct vcpu *vcpu, u64 va, u64 ps, int is_data);
@@ -368,6 +302,10 @@ extern int fetch_code(struct vcpu *vcpu,
extern int fetch_code(struct vcpu *vcpu, u64 gip, u64 *code1, u64 *code2);
extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma);
extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
+extern void vtlb_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 va);
+extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va);
+extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa);
+extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64
va, u64 rid)
{
@@ -377,13 +315,6 @@ static inline void vmx_vcpu_set_tr (thas
trp->rid = rid;
}
-
-//#define VTLB_DEBUG
-#ifdef VTLB_DEBUG
-extern void check_vtlb_sanity(thash_cb_t *vtlb);
-extern void dump_vtlb(thash_cb_t *vtlb);
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* XEN_TLBthash_H */
diff -r 18b087bafac6 -r 166073f830a3 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Tue May 30 08:46:21 2006 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h Tue May 30 10:28:59 2006 -0600
@@ -121,6 +121,7 @@ extern void dvhpt_fault (VCPU *vcpu, u64
extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
extern void page_not_present(VCPU *vcpu, u64 vadr);
+extern void data_access_rights(VCPU *vcpu, u64 vadr);
/**************************************************************************
VCPU control register access routines
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|