A behavior with unimplemented physical address on HVM is unpredictable.
An unimplemented data fault or an unimplemented instruction trap
should be raised.
Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
diff -r 716a637722e4 xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c Mon Mar 10 11:55:40 2008 -0600
+++ b/xen/arch/ia64/vmx/vmx_fault.c Thu Mar 13 13:54:14 2008 +0900
@@ -328,6 +328,11 @@ static int vmx_handle_lds(REGS* regs)
return IA64_FAULT;
}
+static inline int unimpl_phys_addr (u64 paddr)
+{
+ return (pa_clear_uc(paddr) >> MAX_PHYS_ADDR_BITS) != 0;
+}
+
/* We came here because the H/W VHPT walker failed to find an entry */
IA64FAULT
vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs)
@@ -351,20 +356,26 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
else
panic_domain(regs, "wrong vec:%lx\n", vec);
- /* Physical mode and region is 0 or 4. */
mmu_mode = VMX_MMU_MODE(v);
if ((mmu_mode == VMX_MMU_PHY_DT
- || (mmu_mode == VMX_MMU_PHY_D && type == DSIDE_TLB))
- && (REGION_NUMBER(vadr) & 3) == 0) {
+ || (mmu_mode == VMX_MMU_PHY_D && type == DSIDE_TLB))) {
if (type == DSIDE_TLB) {
u64 pte;
/* DTLB miss. */
if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
return vmx_handle_lds(regs);
+ if (unlikely(unimpl_phys_addr(vadr))) {
+ unimpl_daddr(v);
+ return IA64_FAULT;
+ }
pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
- /* Clear UC bit in vadr with the shifts. */
if (v->domain != dom0 && (pte & GPFN_IO_MASK)) {
emulate_io_inst(v, pa_clear_uc(vadr), 4, pte);
+ return IA64_FAULT;
+ }
+ } else {
+ if (unlikely(unimpl_phys_addr(vadr))) {
+ unimpl_iaddr_trap(v, vadr);
return IA64_FAULT;
}
}
diff -r 716a637722e4 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c Mon Mar 10 11:55:40 2008 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c Thu Mar 13 13:42:58 2008 +0900
@@ -277,9 +277,6 @@ static IA64FAULT vmx_emul_ptc_l(VCPU *vc
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -338,9 +335,6 @@ static IA64FAULT vmx_emul_ptc_g(VCPU *vc
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -374,9 +368,6 @@ static IA64FAULT vmx_emul_ptc_ga(VCPU *v
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -411,9 +402,6 @@ static IA64FAULT ptr_fault_check(VCPU *v
return IA64_FAULT;
}
if (unimplemented_gva(vcpu,r3) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -635,9 +623,6 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
return IA64_FAULT;
}
if (unimplemented_gva(vcpu, ifa)) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -703,9 +688,6 @@ static IA64FAULT vmx_emul_itr_i(VCPU *vc
return IA64_FAULT;
}
if (unimplemented_gva(vcpu, ifa)) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
@@ -764,9 +746,6 @@ static IA64FAULT itc_fault_check(VCPU *v
}
#ifdef VMAL_NO_FAULT_CHECK
if (unimplemented_gva(vcpu,ifa) ) {
- isr.val = set_isr_ei_ni(vcpu);
- isr.code = IA64_RESERVED_REG_FAULT;
- vcpu_set_isr(vcpu, isr.val);
unimpl_daddr(vcpu);
return IA64_FAULT;
}
diff -r 716a637722e4 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Mon Mar 10 11:55:40 2008 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h Thu Mar 13 13:55:46 2008 +0900
@@ -582,6 +582,11 @@ static inline void
static inline void
unimpl_daddr (VCPU *vcpu)
{
+ ISR isr;
+
+ isr.val = set_isr_ei_ni(vcpu);
+ isr.code = IA64_UNIMPL_DADDR_FAULT;
+ vcpu_set_isr(vcpu, isr.val);
_general_exception(vcpu);
}
@@ -695,4 +700,21 @@ data_access_rights(VCPU *vcpu, u64 vadr)
set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
}
+
+/*
+ * Unimplement Instruction Address Trap
+ * @ Lower-Privilege Transfer Trap Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+static inline void
+unimpl_iaddr_trap (VCPU *vcpu, u64 vadr)
+{
+ ISR isr;
+
+ isr.val = set_isr_ei_ni(vcpu);
+ isr.code = IA64_UNIMPL_IADDR_TRAP;
+ vcpu_set_isr(vcpu, isr.val);
+ vcpu_set_ifa(vcpu, vadr);
+ inject_guest_interruption(vcpu, IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR);
+}
#endif
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|