WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] remove unnecessary vtlb_lock

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] remove unnecessary vtlb_lock
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 03 Jul 2006 10:20:19 +0000
Delivery-date: Mon, 03 Jul 2006 03:25:04 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 5389c7b06ccf21856d50d658c5a52bfb190b642c
# Parent  535b466ee1ef431bbb82b0bdea1b3c90a85914b3
[IA64] remove unnecessary vtlb_lock

remove vtlb_lock.
As Tristan pointed out, vtlb_lock is unnecesary.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/domain.c    |    1 -
 xen/arch/ia64/xen/faults.c    |    8 ++++----
 xen/arch/ia64/xen/vhpt.c      |   16 ++--------------
 xen/include/asm-ia64/domain.h |    3 ---
 4 files changed, 6 insertions(+), 22 deletions(-)

diff -r 535b466ee1ef -r 5389c7b06ccf xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Mon Jun 19 13:06:53 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Mon Jun 19 13:13:01 2006 -0600
@@ -306,7 +306,6 @@ int arch_domain_create(struct domain *d)
        // the following will eventually need to be negotiated dynamically
        d->arch.shared_info_va = SHAREDINFO_ADDR;
        d->arch.breakimm = 0x1000;
-       seqlock_init(&d->arch.vtlb_lock);
 
        if (is_idle_domain(d))
            return 0;
diff -r 535b466ee1ef -r 5389c7b06ccf xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c        Mon Jun 19 13:06:53 2006 -0600
+++ b/xen/arch/ia64/xen/faults.c        Mon Jun 19 13:13:01 2006 -0600
@@ -214,8 +214,6 @@ void ia64_do_page_fault (unsigned long a
        // FIXME should validate address here
        unsigned long pteval;
        unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
-       seqlock_t* vtlb_lock = &current->domain->arch.vtlb_lock;
-       unsigned long seq;
        IA64FAULT fault;
 
        if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
@@ -232,14 +230,16 @@ void ia64_do_page_fault (unsigned long a
        }
 
  again:
-       seq = read_seqbegin(vtlb_lock);
        fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
        if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
                u64 logps;
                struct p2m_entry entry;
                pteval = translate_domain_pte(pteval, address, itir, &logps, 
&entry);
                vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
-               if (read_seqretry(vtlb_lock, seq) || p2m_entry_retry(&entry)) {
+               if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
+                   p2m_entry_retry(&entry)) {
+                       /* dtlb has been purged in-between.  This dtlb was
+                          matching.  Undo the work.  */
                        vcpu_flush_tlb_vhpt_range(address & ((1 << logps) - 1),
                                                  logps);
                        goto again;
diff -r 535b466ee1ef -r 5389c7b06ccf xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Mon Jun 19 13:06:53 2006 -0600
+++ b/xen/arch/ia64/xen/vhpt.c  Mon Jun 19 13:13:01 2006 -0600
@@ -152,9 +152,7 @@ void domain_flush_vtlb_all (void)
 {
        int cpu = smp_processor_id ();
        struct vcpu *v;
-       seqlock_t* vtlb_lock = &current->domain->arch.vtlb_lock;
-
-       write_seqlock(vtlb_lock);
+
        for_each_vcpu (current->domain, v)
                if (v->processor == cpu)
                        vcpu_flush_vtlb_all ();
@@ -163,7 +161,6 @@ void domain_flush_vtlb_all (void)
                                (v->processor,
                                 (void(*)(void *))vcpu_flush_vtlb_all,
                                 NULL,1,1);
-       write_sequnlock(vtlb_lock);
 }
 
 static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range)
@@ -190,7 +187,6 @@ void vcpu_flush_tlb_vhpt_range (u64 vadr
 
 void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
 {
-       seqlock_t* vtlb_lock = &d->arch.vtlb_lock;
        struct vcpu *v;
 
 #if 0
@@ -201,7 +197,6 @@ void domain_flush_vtlb_range (struct dom
        }
 #endif
 
-       write_seqlock(vtlb_lock);
        for_each_vcpu (d, v) {
                /* Purge TC entries.
                   FIXME: clear only if match.  */
@@ -218,7 +213,6 @@ void domain_flush_vtlb_range (struct dom
 
        /* ptc.ga  */
        ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
-       write_sequnlock(vtlb_lock);
 }
 
 static void flush_tlb_vhpt_all (struct domain *d)
@@ -230,8 +224,6 @@ static void flush_tlb_vhpt_all (struct d
        local_flush_tlb_all ();
 }
 
-// this is called when a domain is destroyed
-// so that there is no race.
 void domain_flush_destroy (struct domain *d)
 {
        /* Very heavy...  */
@@ -241,10 +233,8 @@ void domain_flush_destroy (struct domain
 
 void flush_tlb_mask(cpumask_t mask)
 {
-    seqlock_t* vtlb_lock = &current->domain->arch.vtlb_lock;
     int cpu;
 
-    write_seqlock(vtlb_lock);
     cpu = smp_processor_id();
     if (cpu_isset (cpu, mask)) {
         cpu_clear(cpu, mask);
@@ -252,13 +242,11 @@ void flush_tlb_mask(cpumask_t mask)
     }
 
     if (cpus_empty(mask))
-        goto out;
+        return;
 
     for_each_cpu_mask (cpu, mask)
         smp_call_function_single
             (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
-out:
-    write_sequnlock(vtlb_lock);
 }
 
 void zero_vhpt_stats(void)
diff -r 535b466ee1ef -r 5389c7b06ccf xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Mon Jun 19 13:06:53 2006 -0600
+++ b/xen/include/asm-ia64/domain.h     Mon Jun 19 13:13:01 2006 -0600
@@ -98,9 +98,6 @@ struct arch_domain {
     void *efi_runtime;
     /* Metaphysical address to fpswa_interface_t in domain firmware memory is 
set. */
     void *fpswa_inf;
-
-    // protect v->itlb, v->dtlb and vhpt
-    seqlock_t   vtlb_lock ____cacheline_aligned_in_smp;
 };
 #define INT_ENABLE_OFFSET(v)             \
     (sizeof(vcpu_info_t) * (v)->vcpu_id + \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] remove unnecessary vtlb_lock, Xen patchbot-unstable <=