WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [PATCH] raise a fault with unimplemented physical addre

An unimplemented data fault or an unimplemented instruction trap
should be raised with unimplemented physical address.
Also some cleanups.

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

diff -r 716a637722e4 xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c     Mon Mar 10 11:55:40 2008 -0600
+++ b/xen/arch/ia64/vmx/vmx_fault.c     Fri Mar 14 11:03:35 2008 +0900
@@ -328,6 +328,11 @@ static int vmx_handle_lds(REGS* regs)
     return IA64_FAULT;
 }
 
+static inline int unimpl_phys_addr (u64 paddr)
+{
+    return (pa_clear_uc(paddr) >> MAX_PHYS_ADDR_BITS) != 0;
+}
+
 /* We came here because the H/W VHPT walker failed to find an entry */
 IA64FAULT
 vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs)
@@ -361,10 +366,18 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
             /* DTLB miss.  */
             if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
                 return vmx_handle_lds(regs);
+            if (unlikely(unimpl_phys_addr(vadr))) {
+                unimpl_daddr(v);
+                return IA64_FAULT;
+            }
             pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
-            /* Clear UC bit in vadr with the shifts.  */
             if (v->domain != dom0 && (pte & GPFN_IO_MASK)) {
                 emulate_io_inst(v, pa_clear_uc(vadr), 4, pte);
+                return IA64_FAULT;
+            }
+        } else {
+            if (unlikely(unimpl_phys_addr(vadr))) {
+                unimpl_iaddr_trap(v, vadr);
                 return IA64_FAULT;
             }
         }
diff -r 716a637722e4 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Mon Mar 10 11:55:40 2008 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Fri Mar 14 11:02:24 2008 +0900
@@ -277,9 +277,6 @@ static IA64FAULT vmx_emul_ptc_l(VCPU *vc
     }
 #ifdef  VMAL_NO_FAULT_CHECK
     if (unimplemented_gva(vcpu,r3) ) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
     }
@@ -338,9 +335,6 @@ static IA64FAULT vmx_emul_ptc_g(VCPU *vc
     }
 #ifdef  VMAL_NO_FAULT_CHECK
     if (unimplemented_gva(vcpu,r3) ) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
     }
@@ -374,9 +368,6 @@ static IA64FAULT vmx_emul_ptc_ga(VCPU *v
     }
 #ifdef  VMAL_NO_FAULT_CHECK
     if (unimplemented_gva(vcpu,r3) ) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
     }
@@ -411,9 +402,6 @@ static IA64FAULT ptr_fault_check(VCPU *v
         return IA64_FAULT;
     }
     if (unimplemented_gva(vcpu,r3) ) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
    }
@@ -635,9 +623,6 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
        return IA64_FAULT;
     }
     if (unimplemented_gva(vcpu, ifa)) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
    }
@@ -703,9 +688,6 @@ static IA64FAULT vmx_emul_itr_i(VCPU *vc
        return IA64_FAULT;
     }
     if (unimplemented_gva(vcpu, ifa)) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
     }
@@ -764,9 +746,6 @@ static IA64FAULT itc_fault_check(VCPU *v
     }
 #ifdef  VMAL_NO_FAULT_CHECK
     if (unimplemented_gva(vcpu,ifa) ) {
-        isr.val = set_isr_ei_ni(vcpu);
-        isr.code = IA64_RESERVED_REG_FAULT;
-        vcpu_set_isr(vcpu, isr.val);
         unimpl_daddr(vcpu);
         return IA64_FAULT;
     }
diff -r 716a637722e4 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Mon Mar 10 11:55:40 2008 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Fri Mar 14 11:02:24 2008 +0900
@@ -582,6 +582,11 @@ static inline void
 static inline void
 unimpl_daddr (VCPU *vcpu)
 {
+       ISR isr;
+
+       isr.val = set_isr_ei_ni(vcpu);
+       isr.code = IA64_UNIMPL_DADDR_FAULT;
+       vcpu_set_isr(vcpu, isr.val);
        _general_exception(vcpu);
 }
 
@@ -695,4 +700,21 @@ data_access_rights(VCPU *vcpu, u64 vadr)
        set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
        inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
 }
+
+/*
+ * Unimplement Instruction Address Trap
+ *  @ Lower-Privilege Transfer Trap Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+static inline void
+unimpl_iaddr_trap (VCPU *vcpu, u64 vadr)
+{
+       ISR isr;
+
+       isr.val = set_isr_ei_ni(vcpu);
+       isr.code = IA64_UNIMPL_IADDR_TRAP;
+       vcpu_set_isr(vcpu, isr.val);
+       vcpu_set_ifa(vcpu, vadr);
+       inject_guest_interruption(vcpu, IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR);
+}
 #endif
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel