WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Improved TLB flushing of subsets of CPUs. Can now do rem

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Improved TLB flushing of subsets of CPUs. Can now do remote invlpg
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Thu, 31 Mar 2005 13:12:29 +0000
Delivery-date: Thu, 31 Mar 2005 14:02:29 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1410, 2005/03/31 14:12:29+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Improved TLB flushing of subsets of CPUs. Can now do remote invlpg
        as well as complete flush.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/mm.c              |   31 +++++++++++++++++++---------
 arch/x86/mtrr/generic.c    |    4 +--
 arch/x86/smp.c             |   15 +++++++++----
 arch/x86/x86_32/mm.c       |    4 +--
 arch/x86/x86_64/mm.c       |    4 +--
 common/grant_table.c       |   25 ++++++++++++----------
 include/asm-x86/flushtlb.h |   49 +++++++++++++++++----------------------------
 include/asm-x86/page.h     |   13 -----------
 8 files changed, 70 insertions(+), 75 deletions(-)


diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-03-31 09:02:28 -05:00
+++ b/xen/arch/x86/mm.c 2005-03-31 09:02:28 -05:00
@@ -1476,23 +1476,34 @@
             break;
     
         case MMUEXT_INVLPG_LOCAL:
-            __flush_tlb_one(op.linear_addr);
+            local_flush_tlb_one(op.linear_addr);
             break;
 
         case MMUEXT_TLB_FLUSH_MULTI:
-            flush_tlb_mask(d->cpuset); /* XXX KAF XXX */
-            break;
-    
         case MMUEXT_INVLPG_MULTI:
-            flush_tlb_mask(d->cpuset); /* XXX KAF XXX */
+        {
+            unsigned long inset = op.cpuset, outset = 0;
+            while ( inset != 0 )
+            {
+                unsigned int vcpu = find_first_set_bit(inset);
+                inset &= ~(1UL<<vcpu);
+                if ( (vcpu < MAX_VIRT_CPUS) &&
+                     ((ed = d->exec_domain[vcpu]) != NULL) )
+                    outset |= 1UL << ed->processor;
+            }
+            if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
+                flush_tlb_mask(outset & d->cpuset);
+            else
+                flush_tlb_one_mask(outset & d->cpuset, op.linear_addr);
             break;
+        }
 
         case MMUEXT_TLB_FLUSH_ALL:
             flush_tlb_mask(d->cpuset);
             break;
     
         case MMUEXT_INVLPG_ALL:
-            flush_tlb_mask(d->cpuset); /* XXX KAF XXX */
+            flush_tlb_one_mask(d->cpuset, op.linear_addr);
             break;
 
         case MMUEXT_FLUSH_CACHE:
@@ -2029,10 +2040,10 @@
         percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
         break;
     case UVMF_INVLPG_LOCAL:
-        __flush_tlb_one(va);
+        local_flush_tlb_one(va);
         break;
     case UVMF_INVLPG_ALL:
-        flush_tlb_mask(d->cpuset); /* XXX KAF XXX */
+        flush_tlb_one_mask(d->cpuset, va);
         break;
     }
 
@@ -2317,7 +2328,7 @@
 
     /* Ensure that there are no stale writable mappings in any TLB. */
     /* NB. INVLPG is a serialising instruction: flushes pending updates. */
-    __flush_tlb_one(l1va); /* XXX Multi-CPU guests? */
+    local_flush_tlb_one(l1va); /* XXX Multi-CPU guests? */
     PTWR_PRINTK("[%c] disconnected_l1va at %p now %p\n",
                 PTWR_PRINT_WHICH, ptep, pte);
 
@@ -2636,7 +2647,7 @@
          likely(!shadow_mode_enabled(ed->domain)) )
     {
         *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
-        flush_tlb(); /* XXX Multi-CPU guests? */
+        local_flush_tlb(); /* XXX Multi-CPU guests? */
     }
     
     /* Temporarily map the L1 page, and make a copy of it. */
diff -Nru a/xen/arch/x86/mtrr/generic.c b/xen/arch/x86/mtrr/generic.c
--- a/xen/arch/x86/mtrr/generic.c       2005-03-31 09:02:28 -05:00
+++ b/xen/arch/x86/mtrr/generic.c       2005-03-31 09:02:28 -05:00
@@ -261,7 +261,7 @@
        }
 
        /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
-       __flush_tlb();
+       local_flush_tlb();
 
        /*  Save MTRR state */
        rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
@@ -273,7 +273,7 @@
 static void post_set(void)
 {
        /*  Flush TLBs (no need to flush caches - they are disabled)  */
-       __flush_tlb();
+       local_flush_tlb();
 
        /* Intel (P6) standard MTRRs */
        wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
diff -Nru a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        2005-03-31 09:02:28 -05:00
+++ b/xen/arch/x86/smp.c        2005-03-31 09:02:28 -05:00
@@ -148,17 +148,20 @@
 }
 
 static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
-static unsigned long flush_cpumask;
+static unsigned long flush_cpumask, flush_va;
 
 asmlinkage void smp_invalidate_interrupt(void)
 {
     ack_APIC_irq();
     perfc_incrc(ipis);
-    local_flush_tlb();
+    if ( flush_va == FLUSHVA_ALL )
+        local_flush_tlb();
+    else
+        local_flush_tlb_one(flush_va);
     clear_bit(smp_processor_id(), &flush_cpumask);
 }
 
-void flush_tlb_mask(unsigned long mask)
+void __flush_tlb_mask(unsigned long mask, unsigned long va)
 {
     ASSERT(local_irq_is_enabled());
     
@@ -172,6 +175,7 @@
     {
         spin_lock(&flush_lock);
         flush_cpumask = mask;
+        flush_va      = va;
         send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
         while ( flush_cpumask != 0 )
             cpu_relax();
@@ -190,6 +194,7 @@
         spin_lock(&flush_lock);
         flush_cpumask  = (1UL << smp_num_cpus) - 1;
         flush_cpumask &= ~(1UL << smp_processor_id());
+        flush_va       = FLUSHVA_ALL;
         send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
         while ( flush_cpumask != 0 )
             cpu_relax();
@@ -203,13 +208,13 @@
 
 static void flush_tlb_all_pge_ipi(void *info)
 {
-    __flush_tlb_pge();
+    local_flush_tlb_pge();
 }
 
 void flush_tlb_all_pge(void)
 {
     smp_call_function(flush_tlb_all_pge_ipi, 0, 1, 1);
-    __flush_tlb_pge();
+    local_flush_tlb_pge();
 }
 
 void smp_send_event_check_mask(unsigned long cpu_mask)
diff -Nru a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  2005-03-31 09:02:28 -05:00
+++ b/xen/arch/x86/x86_32/mm.c  2005-03-31 09:02:28 -05:00
@@ -48,7 +48,7 @@
         {
             /* Super-page mapping. */
             if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
-                __flush_tlb_pge();
+                local_flush_tlb_pge();
             *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
 
             v += 1 << L2_PAGETABLE_SHIFT;
@@ -66,7 +66,7 @@
             }
             pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
             if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
-                __flush_tlb_one(v);
+                local_flush_tlb_one(v);
             *pl1e = mk_l1_pgentry(p|flags);
 
             v += 1 << L1_PAGETABLE_SHIFT;
diff -Nru a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  2005-03-31 09:02:28 -05:00
+++ b/xen/arch/x86/x86_64/mm.c  2005-03-31 09:02:28 -05:00
@@ -89,7 +89,7 @@
         {
             /* Super-page mapping. */
             if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
-                __flush_tlb_pge();
+                local_flush_tlb_pge();
             *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
 
             v += 1 << L2_PAGETABLE_SHIFT;
@@ -107,7 +107,7 @@
             }
             pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
             if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
-                __flush_tlb_one(v);
+                local_flush_tlb_one(v);
             *pl1e = mk_l1_pgentry(p|flags);
 
             v += 1 << L1_PAGETABLE_SHIFT;
diff -Nru a/xen/common/grant_table.c b/xen/common/grant_table.c
--- a/xen/common/grant_table.c  2005-03-31 09:02:28 -05:00
+++ b/xen/common/grant_table.c  2005-03-31 09:02:28 -05:00
@@ -102,7 +102,8 @@
     if ( ((host_virt_addr != 0) || (flags & GNTMAP_host_map) ) &&
          unlikely(!__addr_ok(host_virt_addr)))
     {
-        DPRINTK("Bad virtual address (%x) or flags (%x).\n", host_virt_addr, 
flags);
+        DPRINTK("Bad virtual address (%x) or flags (%x).\n",
+                host_virt_addr, flags);
         (void)__put_user(GNTST_bad_virt_addr, &uop->handle);
         return GNTST_bad_gntref;
     }
@@ -332,8 +333,10 @@
          */
     }
 
-    /* Only make the maptrack live _after_ writing the pte, in case
-     * we overwrite the same frame number, causing a maptrack walk to find it 
*/
+    /*
+     * Only make the maptrack live _after_ writing the pte, in case we 
+     * overwrite the same frame number, causing a maptrack walk to find it.
+     */
     ld->grant_table->maptrack[handle].domid         = dom;
     ld->grant_table->maptrack[handle].ref_and_flags =
         (ref << MAPTRACK_REF_SHIFT) | (flags & MAPTRACK_GNTMAP_MASK);
@@ -364,13 +367,14 @@
     unsigned long va = 0;
 
     for ( i = 0; i < count; i++ )
-        if ( __gnttab_map_grant_ref(&uop[i], &va) == 0)
+        if ( __gnttab_map_grant_ref(&uop[i], &va) == 0 )
             flush++;
 
+    /* XXX KAF: I think we are probably flushing too much here. */
     if ( flush == 1 )
-        __flush_tlb_one(va);
+        flush_tlb_one_mask(current->domain->cpuset, va);
     else if ( flush != 0 )
-        local_flush_tlb();
+        flush_tlb_mask(current->domain->cpuset);
 
     return 0;
 }
@@ -457,7 +461,7 @@
         unsigned long   _ol1e;
 
         pl1e = &linear_pg_table[l1_linear_offset(virt)];
-                                                                               
             
+
         if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
         {
             DPRINTK("Could not find PTE entry for address %x\n", virt);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Improved TLB flushing of subsets of CPUs. Can now do remote invlpg, BitKeeper Bot <=