WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [PATCH] Simplify thash_purge_and_insert()

To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [PATCH] Simplify thash_purge_and_insert()
From: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
Date: Wed, 7 Nov 2007 13:24:14 +0900
Delivery-date: Tue, 06 Nov 2007 20:24:52 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
Hi,

This patch simplifies thash_purge_and_insert() for readability.
PV domain never use this function.

Thanks,
Kouya

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

diff -r a071725bda88 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Tue Nov 06 14:20:05 2007 -0700
+++ b/xen/arch/ia64/vmx/vtlb.c  Wed Nov 07 11:09:47 2007 +0900
@@ -413,7 +413,7 @@ static thash_data_t *__alloc_chain(thash
  *  3: The caller need to make sure the new entry will not overlap
  *     with any existed entry.
  */
-void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
+static void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
 {
     thash_data_t *hash_table, *cch;
     /* int flag; */
@@ -421,6 +421,8 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
     /* u64 gppn, ppns, ppne; */
     u64 tag, len;
     thash_cb_t *hcb = &v->arch.vtlb;
+
+    vcpu_quick_region_set(PSCBX(v, tc_regions), va);
 
     vcpu_get_rr(v, va, &vrr.rrval);
     vrr.ps = itir_ps(itir);
@@ -545,60 +547,35 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
  */
 int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
 {
-    u64 ps;//, va;
-    u64 phy_pte;
+    u64 ps, phy_pte, psr;
     ia64_rr mrr;
-    int ret = 0;
 
     ps = itir_ps(itir);
     mrr.rrval = ia64_get_rr(ifa);
-    if (VMX_DOMAIN(v)) {
-        phy_pte = translate_phy_pte(v, &pte, itir, ifa);
-
-        if (pte & VTLB_PTE_IO)
-            ret = 1;
-        vtlb_purge(v, ifa, ps);
-        vhpt_purge(v, ifa, ps);
-        if (ps == mrr.ps) {
-            if (!(pte & VTLB_PTE_IO)) {
-                vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
-            }
-            else{
-                vtlb_insert(v, pte, itir, ifa);
-                vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
-            }
-        }
-        else if (ps > mrr.ps) {
-            vtlb_insert(v, pte, itir, ifa);
-            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
-            if (!(pte & VTLB_PTE_IO)) {
-                vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
-            }
-        }
-        else {
-            u64 psr;
-
-            vtlb_insert(v, pte, itir, ifa);
-            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
-            if (!(pte & VTLB_PTE_IO)) {
-                phy_pte  &= ~PAGE_FLAGS_RV_MASK;
-                psr = ia64_clear_ic();
-                ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
-                ia64_set_psr(psr);
-                ia64_srlz_i();
-            }
-        }
-    }
-    else{
-        phy_pte = translate_phy_pte(v, &pte, itir, ifa);
-        if (ps != PAGE_SHIFT) {
-            vtlb_insert(v, pte, itir, ifa);
-            vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
-        }
-        machine_tlb_purge(ifa, ps);
+
+    phy_pte = translate_phy_pte(v, &pte, itir, ifa);
+
+    vtlb_purge(v, ifa, ps);
+    vhpt_purge(v, ifa, ps);
+
+    if (pte & VTLB_PTE_IO) {
+        vtlb_insert(v, pte, itir, ifa);
+        return 1;
+    }
+
+    if (ps != mrr.ps)
+        vtlb_insert(v, pte, itir, ifa);
+
+    if (ps >= mrr.ps) {
         vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
-    }
-    return ret;
+    } else { /* Subpaging */
+        phy_pte &= ~PAGE_FLAGS_RV_MASK;
+        psr = ia64_clear_ic();
+        ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
+        ia64_set_psr(psr);
+        ia64_srlz_i();
+    }
+    return 0;
 }
 
 /*
diff -r a071725bda88 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Tue Nov 06 14:20:05 2007 -0700
+++ b/xen/include/asm-ia64/vmmu.h       Wed Nov 07 11:08:30 2007 +0900
@@ -216,7 +216,6 @@ extern unsigned long fetch_code(struct v
 extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE 
*pbundle);
 extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma);
 extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
-extern void vtlb_insert(struct vcpu *vcpu, u64 pte, u64 itir, u64 va);
 extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va);
 extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
                               int type);
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
<Prev in Thread] Current Thread [Next in Thread>