[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: rcu readers - remove dummy lock object



On 25/03/07 15:58 -0400, Mike D. Day wrote:
rcu_read_lock and rcu_read_unlock in Xen are empty macros used to
document the use of rcu techniques. The Xen definition for rcu readers
uses a dummy lock object. The upstream linux code has since redefined
rcu reader lock routines to omit the dummy lock object. This makes rcu
read locking appear similar to memory barriers and other instructions
that do not take an operand.

Reposting corrected patch - acm code (not automatically compiled)
didn't include new definition of rcu_read_unlock. now corrected.
--

diff -r 6f2f72f39872 xen/acm/acm_simple_type_enforcement_hooks.c
--- a/xen/acm/acm_simple_type_enforcement_hooks.c       Fri Mar 23 15:10:46 
2007 +0000
+++ b/xen/acm/acm_simple_type_enforcement_hooks.c       Sun Mar 25 16:10:08 
2007 -0400
@@ -180,7 +180,7 @@ ste_init_state(struct acm_ste_policy_buf
    struct active_grant_entry *act;
    int port, i;

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    /* go by domain? or directly by global? event/grant list */
    /* go through all domains and adjust policy as if this domain was started 
now */
    for_each_domain ( d )
@@ -263,7 +263,7 @@ ste_init_state(struct acm_ste_policy_buf
    }
    violation = 0;
 out:
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
    return violation;
/* returning "violation != 0" means that existing sharing between domains would not * have been allowed if the new policy had been enforced before the sharing; for ste, @@ -324,14 +324,14 @@ ste_set_policy(u8 *buf, u32 buf_size)
    ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;

    /* clear all ste caches */
-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain ( d ) {
ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, (struct acm_ssid_domain *)(d)->ssid);
        for (i=0; i<ACM_TE_CACHE_SIZE; i++)
            ste_ssid->ste_cache[i].valid = ACM_STE_free;
    }
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
    return ACM_OK;

 error_free:
@@ -437,7 +437,7 @@ clean_id_from_cache(domid_t id)
    struct acm_ssid_domain *ssid;

    printkd("deleting cache for dom %x.\n", id);
-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    /* look through caches of all domains */
    for_each_domain ( d ) {
        ssid = (struct acm_ssid_domain *)(d->ssid);
@@ -456,7 +456,7 @@ clean_id_from_cache(domid_t id)
                ste_ssid->ste_cache[i].valid = ACM_STE_free;
    }
 out:
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
}

/***************************
diff -r 6f2f72f39872 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/arch/ia64/linux-xen/mca.c     Sun Mar 25 15:21:51 2007 -0400
@@ -790,7 +790,7 @@ init_handler_platform (pal_min_state_are
                        /* this route is for dump routine */
                        unw_init_running(try_crashdump, pt);
                } else {
-                       rcu_read_lock(&domlist_read_lock);
+                       rcu_read_lock();
                        for_each_domain(d) {
                                for_each_vcpu(d, v) {
                                        printk("Backtrace of current vcpu "
@@ -799,7 +799,7 @@ init_handler_platform (pal_min_state_are
                                        show_stack(v, NULL);
                                }
                        }
-                       rcu_read_unlock(&domlist_read_lock);
+                       rcu_read_unlock();
                }
        }
        unw_init_running(freeze_cpu_osinit, NULL);
diff -r 6f2f72f39872 xen/arch/ia64/linux-xen/perfmon.c
--- a/xen/arch/ia64/linux-xen/perfmon.c Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/arch/ia64/linux-xen/perfmon.c Sun Mar 25 15:22:30 2007 -0400
@@ -7685,11 +7685,11 @@ xenpfm_start_stop_locked(int is_start)
        while (atomic_read(&arg.started) != cpus)
                cpu_relax();

-       rcu_read_lock(&domlist_read_lock);
+       rcu_read_lock();
        for_each_domain(d)
                for_each_vcpu(d, v)
                        xenpfm_start_stop_vcpu(v, is_start);
-       rcu_read_unlock(&domlist_read_lock);
+       rcu_read_unlock();

        arg.error[smp_processor_id()] = __xenpfm_start_stop(is_start);
        atomic_inc(&arg.finished);
diff -r 6f2f72f39872 xen/arch/powerpc/audit.c
--- a/xen/arch/powerpc/audit.c  Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/arch/powerpc/audit.c  Sun Mar 25 15:23:04 2007 -0400
@@ -34,10 +34,10 @@ void audit_domains(void)
void audit_domains(void)
{
    struct domain *d;
-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain ( d )
        audit_domain(d);
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
}

void audit_domains_key(unsigned char key)
diff -r 6f2f72f39872 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Sun Mar 25 15:23:29 2007 -0400
@@ -326,7 +326,7 @@ static void vmcb_dump(unsigned char ch)
printk("*********** VMCB Areas **************\n");

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();

    for_each_domain ( d )
    {
@@ -340,7 +340,7 @@ static void vmcb_dump(unsigned char ch)
        }
    }

-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();

    printk("**************************************\n");
}
diff -r 6f2f72f39872 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Sun Mar 25 15:23:52 2007 -0400
@@ -568,7 +568,7 @@ static void vmcs_dump(unsigned char ch)
printk("*********** VMCS Areas **************\n");

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();

    for_each_domain ( d )
    {
@@ -584,7 +584,7 @@ static void vmcs_dump(unsigned char ch)
        }
    }

-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();

    printk("**************************************\n");
}
diff -r 6f2f72f39872 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Sun Mar 25 15:24:04 2007 -0400
@@ -891,7 +891,7 @@ static void shadow_blow_all_tables(unsig
{
    struct domain *d;
    printk("'%c' pressed -> blowing all shadow tables\n", c);
-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain(d)
    {
        if ( shadow_mode_enabled(d) && d->vcpu[0] != NULL )
@@ -901,7 +901,7 @@ static void shadow_blow_all_tables(unsig
            shadow_unlock(d);
        }
    }
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
}

/* Register this function in the Xen console keypress table */
diff -r 6f2f72f39872 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/arch/x86/time.c       Sun Mar 25 15:24:28 2007 -0400
@@ -720,10 +720,10 @@ void do_settime(unsigned long secs, unsi
    wc_nsec = _wc_nsec = (u32)y;
    spin_unlock(&wc_lock);

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain ( d )
        update_domain_wallclock_time(d);
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
}

static void local_time_calibration(void *unused)
diff -r 6f2f72f39872 xen/common/domain.c
--- a/xen/common/domain.c       Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/common/domain.c       Sun Mar 25 15:25:22 2007 -0400
@@ -31,7 +31,6 @@

/* Protect updates/reads (resp.) of domain_list and domain_hash. */
DEFINE_SPINLOCK(domlist_update_lock);
-DEFINE_RCU_READ_LOCK(domlist_read_lock);

#define DOMAIN_HASH_SIZE 256
#define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
@@ -216,7 +215,7 @@ struct domain *get_domain_by_id(domid_t {
    struct domain *d;

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();

    for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
          d != NULL;
@@ -230,7 +229,7 @@ struct domain *get_domain_by_id(domid_t }
    }

-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();

    return d;
}
@@ -240,7 +239,7 @@ struct domain *rcu_lock_domain_by_id(dom
{
    struct domain *d;

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();

    for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
          d != NULL;
@@ -250,7 +249,7 @@ struct domain *rcu_lock_domain_by_id(dom
            return d;
    }

-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();

    return NULL;
}
diff -r 6f2f72f39872 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/common/domctl.c       Sun Mar 25 15:26:23 2007 -0400
@@ -142,12 +142,12 @@ static unsigned int default_vcpu0_locati
    cpumask_t      cpu_exclude_map;

    /* Do an initial CPU placement. Pick the least-populated CPU. */
-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain ( d )
        for_each_vcpu ( d, v )
        if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
            cnt[v->processor]++;
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();

    /*
* If we're on a HT system, we only auto-allocate to a non-primary HT. We @@ -473,7 +473,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
        if ( dom == DOMID_SELF )
            dom = current->domain->domain_id;

-        rcu_read_lock(&domlist_read_lock);
+        rcu_read_lock();

        for_each_domain ( d )
        {
@@ -483,7 +483,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc

        if ( d == NULL )
        {
-            rcu_read_unlock(&domlist_read_lock);
+            rcu_read_unlock();
            ret = -ESRCH;
            break;
        }
@@ -494,7 +494,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
        if ( copy_to_guest(u_domctl, op, 1) )
            ret = -EFAULT;

-        rcu_read_unlock(&domlist_read_lock);
+        rcu_read_unlock();
    }
    break;

diff -r 6f2f72f39872 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/common/keyhandler.c   Sun Mar 25 15:27:30 2007 -0400
@@ -158,7 +158,7 @@ static void dump_domains(unsigned char k
    printk("'%c' pressed -> dumping domain info (now=0x%X:%08X)\n", key,
           (u32)(now>>32), (u32)now);

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();

    for_each_domain ( d )
    {
@@ -212,7 +212,7 @@ static void dump_domains(unsigned char k
        }
    }

-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
}

static cpumask_t read_clocks_cpumask = CPU_MASK_NONE;
diff -r 6f2f72f39872 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/common/sched_sedf.c   Sun Mar 25 15:28:09 2007 -0400
@@ -1277,7 +1277,7 @@ static void sedf_dump_cpu_state(int i)
    loop = 0;
    printk("\nnot on Q\n");

-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain ( d )
    {
        for_each_vcpu(d, ed)
@@ -1289,7 +1289,7 @@ static void sedf_dump_cpu_state(int i)
            }
        }
    }
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
}


@@ -1302,7 +1302,7 @@ static int sedf_adjust_weights(struct xe
    s_time_t            sumt[NR_CPUS] = { 0 };

    /* Sum across all weights. */
-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain( d )
    {
        for_each_vcpu( d, p )
@@ -1326,10 +1326,10 @@ static int sedf_adjust_weights(struct xe
            }
        }
    }
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();

    /* Adjust all slices (and periods) to the new weight. */
-    rcu_read_lock(&domlist_read_lock);
+    rcu_read_lock();
    for_each_domain( d )
    {
        for_each_vcpu ( d, p )
@@ -1346,7 +1346,7 @@ static int sedf_adjust_weights(struct xe
            }
        }
    }
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();

    return 0;
}
diff -r 6f2f72f39872 xen/common/sysctl.c
--- a/xen/common/sysctl.c       Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/common/sysctl.c       Sun Mar 25 15:28:32 2007 -0400
@@ -79,7 +79,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc
        struct xen_domctl_getdomaininfo info;
        u32 num_domains = 0;

-        rcu_read_lock(&domlist_read_lock);
+        rcu_read_lock();

        for_each_domain ( d )
        {
@@ -100,7 +100,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc
            num_domains++;
        }
- rcu_read_unlock(&domlist_read_lock);
+        rcu_read_unlock();
if ( ret != 0 )
            break;
diff -r 6f2f72f39872 xen/include/xen/rcupdate.h
--- a/xen/include/xen/rcupdate.h        Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/include/xen/rcupdate.h        Sun Mar 25 15:15:30 2007 -0400
@@ -111,14 +111,6 @@ int rcu_pending(int cpu);
int rcu_pending(int cpu);
int rcu_needs_cpu(int cpu);

-/*
- * Dummy lock type for passing to rcu_read_{lock,unlock}. Currently exists
- * only to document the reason for rcu_read_lock() critical sections.
- */
-struct _rcu_read_lock {};
-typedef struct _rcu_read_lock rcu_read_lock_t;
-#define DEFINE_RCU_READ_LOCK(x) rcu_read_lock_t x
-
/**
 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
 *
@@ -145,14 +137,14 @@ typedef struct _rcu_read_lock rcu_read_l
 *
 * It is illegal to block while in an RCU read-side critical section.
 */
-#define rcu_read_lock(x)       do { } while (0)
+#define rcu_read_lock()       do { } while (0)

/**
 * rcu_read_unlock - marks the end of an RCU read-side critical section.
 *
 * See rcu_read_lock() for more information.
 */
-#define rcu_read_unlock(x)     do { } while (0)
+#define rcu_read_unlock()     do { } while (0)

/*
 * So where is rcu_write_lock()?  It does not exist, as there is no
diff -r 6f2f72f39872 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri Mar 23 15:10:46 2007 +0000
+++ b/xen/include/xen/sched.h   Sun Mar 25 15:29:39 2007 -0400
@@ -291,12 +291,12 @@ struct domain *rcu_lock_domain_by_id(dom
/* Finish a RCU critical region started by rcu_lock_domain_by_id(). */
static inline void rcu_unlock_domain(struct domain *d)
{
-    rcu_read_unlock(&domlist_read_lock);
+    rcu_read_unlock();
}

static inline struct domain *rcu_lock_domain(struct domain *d)
{
-    rcu_read_lock(d);
+    rcu_read_lock();
    return d;
}

@@ -395,7 +395,6 @@ unsigned long hypercall_create_continuat

/* Protect updates/reads (resp.) of domain_list and domain_hash. */
extern spinlock_t domlist_update_lock;
-extern rcu_read_lock_t domlist_read_lock;

extern struct domain *domain_list;



--
Mike D. Day
IBM LTC
Cell: 919 412-3900
Sametime: ncmike@xxxxxxxxxx AIM: ncmikeday  Yahoo: ultra.runner
PGP key: http://www.ncultra.org/ncmike/pubkey.asc

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.