# HG changeset patch # User Tristan Gingold # Date 1203315796 -3600 # Node ID 82a165516da6b696da444bf8dffd87b22816153a # Parent f05d78cd9ed47a99e95694742c4036de8ddf5a4e Handle long vhpt format for ttag and thash. Improve tak emulation. Signed-off-by: Tristan Gingold diff -r f05d78cd9ed4 -r 82a165516da6 xen/arch/ia64/vmx/vmmu.c --- a/xen/arch/ia64/vmx/vmmu.c Mon Feb 18 07:09:06 2008 +0100 +++ b/xen/arch/ia64/vmx/vmmu.c Mon Feb 18 07:23:16 2008 +0100 @@ -467,31 +467,37 @@ u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr) ia64_rr vrr; u64 pval; u64 vhpt_offset; + u64 mask; + + vpta.val = vmx_vcpu_get_pta(vcpu); + vcpu_get_rr(vcpu, vadr, &vrr.rrval); + mask = (1UL << vpta.size) - 1; + if (vpta.vf) { + vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps; + vhpt_offset = vadr ^ vrr.rid; + pval = (vpta.val & ~0x7fffUL) + ((vhpt_offset << 5) & mask); + } else { + vhpt_offset=((vadr>>vrr.ps)<<3) & mask; + pval = (vadr & VRN_MASK) | + (vpta.val<<3>>(vpta.size+3)<<(vpta.size))| + vhpt_offset; + } + return pval; +} + + +u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr) +{ + ia64_rr vrr; + PTA vpta; + u64 pval; + u64 rid; vpta.val = vmx_vcpu_get_pta(vcpu); vcpu_get_rr(vcpu, vadr, &vrr.rrval); if(vpta.vf){ - pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.rrval, - vpta.val, 0, 0, 0, 0); - pval = vpta.val & ~0xffff; - }else{ - vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1); - pval = (vadr & VRN_MASK) | - (vpta.val<<3>>(vpta.size+3)<<(vpta.size))| - vhpt_offset; - } - return pval; -} - - -u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr) -{ - ia64_rr vrr; - PTA vpta; - u64 pval; - vpta.val = vmx_vcpu_get_pta(vcpu); - vcpu_get_rr(vcpu, vadr, &vrr.rrval); - if(vpta.vf){ - pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.rrval, 0, 0, 0, 0, 0); + vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps; + rid = vrr.rid; + pval = vadr ^ (rid << 39); }else{ pval = 1; } @@ -507,83 +513,89 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v REGS *regs; u64 vhpt_adr, madr; IA64_PSR vpsr; - regs=vcpu_regs(vcpu); - pt_isr.val=VMX(vcpu,cr_isr); - visr.val=0; - visr.ei=pt_isr.ei; - visr.ir=pt_isr.ir; + + regs = vcpu_regs(vcpu); + pt_isr.val = VMX(vcpu, cr_isr); + visr.val = 0; + visr.ei = pt_isr.ei; + visr.ir = pt_isr.ir; vpsr.val = VCPU(vcpu, vpsr); - visr.na=1; + visr.na = 1; + + /* First look in VTLB. */ data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); - if(data){ - if(data->p==0){ + if (data) { + if (data->p == 0) { vcpu_set_isr(vcpu,visr.val); data_page_not_present(vcpu, vadr); return IA64_FAULT; - }else if(data->ma == VA_MATTR_NATPAGE){ + } else if (data->ma == VA_MATTR_NATPAGE) { vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; - }else{ + } else { *padr = ((data->ppn >> (data->ps - 12)) << data->ps) | (vadr & (PSIZE(data->ps) - 1)); return IA64_NO_FAULT; } } + + /* Look in mVHPT. */ data = vhpt_lookup(vadr); - if(data){ - if(data->p==0){ + if (data) { + if (data->p == 0) { vcpu_set_isr(vcpu,visr.val); data_page_not_present(vcpu, vadr); return IA64_FAULT; - }else if(data->ma == VA_MATTR_NATPAGE){ + } else if (data->ma == VA_MATTR_NATPAGE) { vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; - }else{ + } else { madr = (data->ppn >> (data->ps - 12) << data->ps) | (vadr & (PSIZE(data->ps) - 1)); *padr = __mpa_to_gpa(madr); return IA64_NO_FAULT; } } - else{ - if(!vhpt_enabled(vcpu, vadr, NA_REF)){ - if(vpsr.ic){ - vcpu_set_isr(vcpu, visr.val); - alt_dtlb(vcpu, vadr); - return IA64_FAULT; - } - else{ - nested_dtlb(vcpu); - return IA64_FAULT; - } - } - else{ - vhpt_adr = vmx_vcpu_thash(vcpu, vadr); - data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB); - if(data){ - if(vpsr.ic){ - vcpu_set_isr(vcpu, visr.val); - dtlb_fault(vcpu, vadr); - return IA64_FAULT; - } - else{ - nested_dtlb(vcpu); - return IA64_FAULT; - } - } - else{ - if(vpsr.ic){ - vcpu_set_isr(vcpu, visr.val); - dvhpt_fault(vcpu, vadr); - return IA64_FAULT; - } - else{ - nested_dtlb(vcpu); - return IA64_FAULT; - } - } + + /* If VHPT is not enabled, inject fault. */ + if (!vhpt_enabled(vcpu, vadr, NA_REF)) { + if (vpsr.ic) { + vcpu_set_isr(vcpu, visr.val); + alt_dtlb(vcpu, vadr); + return IA64_FAULT; + } + else { + nested_dtlb(vcpu); + return IA64_FAULT; + } + } + + /* Get gVHPT entry. */ + vhpt_adr = vmx_vcpu_thash(vcpu, vadr); + data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB); + if (data) { + /* FIXME: we should read gadr from the entry! */ + if (vpsr.ic) { + vcpu_set_isr(vcpu, visr.val); + dtlb_fault(vcpu, vadr); + return IA64_FAULT; + } + else { + nested_dtlb(vcpu); + return IA64_FAULT; + } + } + else { + if (vpsr.ic) { + vcpu_set_isr(vcpu, visr.val); + dvhpt_fault(vcpu, vadr); + return IA64_FAULT; + } + else { + nested_dtlb(vcpu); + return IA64_FAULT; } } } @@ -598,12 +610,25 @@ u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr) return key; } - /* FIXME: if psr.dt is set, look in the guest VHPT. */ data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); - if (!data || !data->p) - key = 1; - else - key = data->key << 8; - - return key; -} + if (data) { + if (data->p) + return data->key << 8; + else + return 1; + } + + data = vhpt_lookup(vadr); + if (data) { + if (data->p) + return data->key << 8; /* FIXME: possible mangling/masking. */ + else + return 1; + } + + if (!vhpt_enabled(vcpu, vadr, NA_REF)) + return 1; + + /* FIXME: look in the guest VHPT. */ + return 1; +}