WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [IA64] ptc.ga for SMP-g

# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID b09e8f46c9f6c6daf59ba97542af05e4981d0579
# Parent  bfc00c83f08318097446b3818c9de6f2ce286fb6
[IA64] ptc.ga for SMP-g

Implementation of ptc.ga for SMP-g.

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>

diff -r bfc00c83f083 -r b09e8f46c9f6 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Fri Apr 07 14:06:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c  Mon Apr 10 14:51:38 2006 -0600
@@ -459,7 +459,7 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
     va = PAGEALIGN(ifa, ps);
     index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
     if (index>=0) {
-        vcpu->arch.dtrs[index].p=0;
+        vcpu->arch.dtrs[index].pte.p=0;
         index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
     }
     hcb = vmx_vcpu_get_vtlb(vcpu);
@@ -476,7 +476,7 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
     va = PAGEALIGN(ifa, ps);
     index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
     if (index>=0) {
-        vcpu->arch.itrs[index].p=0;
+        vcpu->arch.itrs[index].pte.p=0;
         index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
     }
     hcb = vmx_vcpu_get_vtlb(vcpu);
diff -r bfc00c83f083 -r b09e8f46c9f6 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Fri Apr 07 14:06:44 2006 -0600
+++ b/xen/arch/ia64/xen/process.c       Mon Apr 10 14:51:38 2006 -0600
@@ -287,12 +287,24 @@ void ia64_do_page_fault (unsigned long a
                return;
        }
 
+ again:
        fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
-       if (fault == IA64_NO_FAULT) {
+       if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
                pteval = translate_domain_pte(pteval,address,itir);
                
vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
+               if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
+                       /* dtlb has been purged in-between.  This dtlb was
+                          matching.  Undo the work.  */
+#ifdef VHPT_GLOBAL
+                       vhpt_flush_address (address, 1);
+#endif
+                       ia64_ptcl(address, 1<<2);
+                       ia64_srlz_i();
+                       goto again;
+               }
                return;
        }
+
        if (!user_mode (regs)) {
                /* The fault occurs inside Xen.  */
                if (!ia64_done_with_exception(regs)) {
diff -r bfc00c83f083 -r b09e8f46c9f6 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Fri Apr 07 14:06:44 2006 -0600
+++ b/xen/arch/ia64/xen/vcpu.c  Mon Apr 10 14:51:38 2006 -0600
@@ -1253,17 +1253,23 @@ int warn_region0_address = 0; // FIXME l
 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
 
 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
-static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
-{
-       return trp->p && trp->rid == rid 
+static inline int vcpu_match_tr_entry_no_p(TR_ENTRY *trp, UINT64 ifa, UINT64 
rid)
+{
+       return trp->rid == rid 
                && ifa >= trp->vadr
                && ifa <= (trp->vadr + (1L << trp->ps) - 1);
 }
 
+static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
+{
+       return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
+}
+
 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN 
in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
 {
        unsigned long region = address >> 61;
-       unsigned long pta, pte, rid, rr;
+       unsigned long pta, rid, rr;
+       union pte_flags pte;
        int i;
        TR_ENTRY *trp;
 
@@ -1283,6 +1289,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
                         */           
                        printk("vcpu_translate: bad physical address: 0x%lx\n",
                               address);
+
                } else {
                        *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
                                  _PAGE_PL_2 | _PAGE_AR_RWX;
@@ -1307,7 +1314,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
                if (vcpu_quick_region_check(vcpu->arch.dtr_regions,address)) {
                        for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++) {
                                if (vcpu_match_tr_entry(trp,address,rid)) {
-                                       *pteval = trp->page_flags;
+                                       *pteval = trp->pte.val;
                                        *itir = trp->itir;
                                        tr_translate_count++;
                                        return IA64_NO_FAULT;
@@ -1320,7 +1327,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
                if (vcpu_quick_region_check(vcpu->arch.itr_regions,address)) {
                        for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++) {
                                if (vcpu_match_tr_entry(trp,address,rid)) {
-                                       *pteval = trp->page_flags;
+                                       *pteval = trp->pte.val;
                                        *itir = trp->itir;
                                        tr_translate_count++;
                                        return IA64_NO_FAULT;
@@ -1332,12 +1339,14 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
        /* check 1-entry TLB */
        // FIXME?: check dtlb for inst accesses too, else bad things happen?
        trp = &vcpu->arch.dtlb;
-       if (/* is_data && */ vcpu_match_tr_entry(trp,address,rid)) {
-               if (vcpu->domain==dom0 && !in_tpa) *pteval = trp->page_flags;
+       pte = trp->pte;
+       if (/* is_data && */ pte.p
+           && vcpu_match_tr_entry_no_p(trp,address,rid)) {
+               if (vcpu->domain==dom0 && !in_tpa) *pteval = pte.val;
                else *pteval = vcpu->arch.dtlb_pte;
                *itir = trp->itir;
                dtlb_translate_count++;
-               return IA64_NO_FAULT;
+               return IA64_USE_TLB;
        }
 
        /* check guest VHPT */
@@ -1358,7 +1367,8 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
        if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
                return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
 
-       if (__copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
+       if (!__access_ok (*iha)
+           || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
                // virtual VHPT walker "missed" in TLB
                return IA64_VHPT_FAULT;
 
@@ -1367,12 +1377,12 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
        * instead of inserting a not-present translation, this allows
        * vectoring directly to the miss handler.
        */
-       if (!(pte & _PAGE_P))
+       if (!pte.p)
                return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
 
        /* found mapping in guest VHPT! */
        *itir = rr & RR_PS_MASK;
-       *pteval = pte;
+       *pteval = pte.val;
        vhpt_translate_count++;
        return IA64_NO_FAULT;
 }
@@ -1383,7 +1393,7 @@ IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 va
        IA64FAULT fault;
 
        fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
-       if (fault == IA64_NO_FAULT)
+       if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
        {
                mask = itir_mask(itir);
                *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
@@ -1670,24 +1680,27 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6
 
 static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
 {
-       trp->p = 0;
+       trp->pte.val = 0;
 }
 
 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 
ifa)
 {
        UINT64 ps;
+       union pte_flags new_pte;
 
        trp->itir = itir;
        trp->rid = VCPU(current,rrs[ifa>>61]) & RR_RID_MASK;
-       trp->p = 1;
        ps = trp->ps;
-       trp->page_flags = pte;
-       if (trp->pl < 2) trp->pl = 2;
+       new_pte.val = pte;
+       if (new_pte.pl < 2) new_pte.pl = 2;
        trp->vadr = ifa & ~0xfff;
        if (ps > 12) { // "ignore" relevant low-order bits
-               trp->ppn &= ~((1UL<<(ps-12))-1);
+               new_pte.ppn &= ~((1UL<<(ps-12))-1);
                trp->vadr &= ~((1UL<<ps)-1);
        }
+
+       /* Atomic write.  */
+       trp->pte.val = new_pte.val;
 }
 
 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
@@ -1852,19 +1865,6 @@ IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 
        return IA64_ILLOP_FAULT;
 }
 
-#if defined(CONFIG_XEN_SMP) && defined(VHPT_GLOBAL)
-struct ptc_ga_args {
-       unsigned long vadr;
-       unsigned long addr_range;
-};
-
-static void ptc_ga_remote_func (void *varg)
-{
-       struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
-       vhpt_flush_address (args->vadr, args->addr_range);
-}
-#endif
-
 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
 {
        // FIXME: validate not flushing Xen addresses
@@ -1875,32 +1875,20 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
 #ifdef CONFIG_XEN_SMP
        struct domain *d = vcpu->domain;
        struct vcpu *v;
-       struct ptc_ga_args args;
-
-       args.vadr = vadr;
-       args.addr_range = addr_range;
-
-       /* This method is very conservative and should be optimized:
-          - maybe IPI calls can be avoided,
-          - a processor map can be built to avoid duplicate purge
-          - maybe ptc.ga can be replaced by ptc.l+invala.
-          Hopefully, it has no impact when UP.
-       */
+
        for_each_vcpu (d, v) {
-               if (v != vcpu) {
-                       /* Purge tc entry.
-                          Can we do this directly ?  Well, this is just a
-                          single atomic write.  */
-                       vcpu_purge_tr_entry(&PSCBX(v,dtlb));
-                       vcpu_purge_tr_entry(&PSCBX(v,itlb));
+               if (v == vcpu)
+                       continue;
+
+               /* Purge TC entries.
+                  FIXME: clear only if match.  */
+               vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
+               vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+
 #ifdef VHPT_GLOBAL
-                       /* Flush VHPT on remote processors.
-                          FIXME: invalidate directly the entries? */
-                       smp_call_function_single
-                               (v->processor, &ptc_ga_remote_func,
-                                &args, 0, 1);
-#endif
-               }
+               /* Invalidate VHPT entries.  */
+               vhpt_flush_address_remote (v->processor, vadr, addr_range);
+#endif
        }
 #endif
 
diff -r bfc00c83f083 -r b09e8f46c9f6 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Fri Apr 07 14:06:44 2006 -0600
+++ b/xen/arch/ia64/xen/vhpt.c  Mon Apr 10 14:51:38 2006 -0600
@@ -71,6 +71,20 @@ void vhpt_flush_address(unsigned long va
 //printf("vhpt_flush_address: blowing away valid tag for vadr=%p\n",vadr);
 //}
                vlfe->ti_tag |= INVALID_TI_TAG;
+               addr_range -= PAGE_SIZE;
+               vadr += PAGE_SIZE;
+       }
+}
+
+void vhpt_flush_address_remote(int cpu,
+                              unsigned long vadr, unsigned long addr_range)
+{
+       while ((long)addr_range > 0) {
+               /* Get the VHPT entry.  */
+               unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
+               volatile struct vhpt_lf_entry *v;
+               v =__va(per_cpu(vhpt_paddr, cpu) + off);
+               v->ti_tag = INVALID_TI_TAG;
                addr_range -= PAGE_SIZE;
                vadr += PAGE_SIZE;
        }
diff -r bfc00c83f083 -r b09e8f46c9f6 xen/include/asm-ia64/ia64_int.h
--- a/xen/include/asm-ia64/ia64_int.h   Fri Apr 07 14:06:44 2006 -0600
+++ b/xen/include/asm-ia64/ia64_int.h   Mon Apr 10 14:51:38 2006 -0600
@@ -38,6 +38,7 @@
 #define        IA64_RFI_IN_PROGRESS    0x0002
 #define IA64_RETRY              0x0003
 #define IA64_FORCED_IFA         0x0004
+#define IA64_USE_TLB           0x0005
 #define        IA64_ILLOP_FAULT        (IA64_GENEX_VECTOR | 0x00)
 #define        IA64_PRIVOP_FAULT       (IA64_GENEX_VECTOR | 0x10)
 #define        IA64_PRIVREG_FAULT      (IA64_GENEX_VECTOR | 0x20)
diff -r bfc00c83f083 -r b09e8f46c9f6 xen/include/asm-ia64/tlb.h
--- a/xen/include/asm-ia64/tlb.h        Fri Apr 07 14:06:44 2006 -0600
+++ b/xen/include/asm-ia64/tlb.h        Mon Apr 10 14:51:38 2006 -0600
@@ -4,23 +4,24 @@
 #define        NITRS   8
 #define NDTRS  8
 
+union pte_flags {
+    struct {
+           unsigned long p    :  1; // 0
+           unsigned long      :  1; // 1
+           unsigned long ma   :  3; // 2-4
+           unsigned long a    :  1; // 5
+           unsigned long d    :  1; // 6
+           unsigned long pl   :  2; // 7-8
+           unsigned long ar   :  3; // 9-11
+           unsigned long ppn  : 38; // 12-49
+           unsigned long      :  2; // 50-51
+           unsigned long ed   :  1; // 52
+    };
+    unsigned long val;
+};
+
 typedef struct {
-    union {
-        struct {
-            unsigned long p    :  1; // 0
-            unsigned long      :  1; // 1
-            unsigned long ma   :  3; // 2-4
-            unsigned long a    :  1; // 5
-            unsigned long d    :  1; // 6
-            unsigned long pl   :  2; // 7-8
-            unsigned long ar   :  3; // 9-11
-            unsigned long ppn  : 38; // 12-49
-            unsigned long      :  2; // 50-51
-            unsigned long ed   :  1; // 52
-        };
-        unsigned long page_flags;
-    };
-
+    volatile union pte_flags pte;
     union {
         struct {
             unsigned long      :  2; // 0-1
diff -r bfc00c83f083 -r b09e8f46c9f6 xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Fri Apr 07 14:06:44 2006 -0600
+++ b/xen/include/asm-ia64/vhpt.h       Mon Apr 10 14:51:38 2006 -0600
@@ -122,6 +122,8 @@ extern void zero_vhpt_stats(void);
 extern void zero_vhpt_stats(void);
 extern int dump_vhpt_stats(char *buf);
 extern void vhpt_flush_address(unsigned long vadr, unsigned long addr_range);
+extern void vhpt_flush_address_remote(int cpu, unsigned long vadr,
+                                     unsigned long addr_range);
 extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
                                 unsigned long logps);
 extern void vhpt_insert (unsigned long vadr, unsigned long ptr,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>