[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/5] un-alias cpumask_any() from cpumask_first()



In order to achieve more symmetric distribution of certain things,
cpumask_any() shouldn't always pick the first CPU (which frequently
will end up being CPU0). To facilitate that, introduce a library-like
function to obtain random numbers.

The per-architecture function is supposed to return zero if no valid
random number can be obtained (implying that if occasionally zero got
produced as random number, it wouldn't be considered such).

As fallback this uses the trivial algorithm from the C standard,
extended to produce "unsigned int" results.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -17,6 +17,7 @@ obj-y += multicall.o
 obj-y += notifier.o
 obj-y += page_alloc.o
 obj-y += preempt.o
+obj-y += random.o
 obj-y += rangeset.o
 obj-y += sched_credit.o
 obj-y += sched_credit2.o
--- /dev/null
+++ b/xen/common/random.c
@@ -0,0 +1,29 @@
+#include <xen/percpu.h>
+#include <xen/random.h>
+#include <xen/time.h>
+#include <asm/random.h>
+
+static DEFINE_PER_CPU(unsigned int, seed);
+
+unsigned int get_random(void)
+{
+    unsigned int next = this_cpu(seed), val = arch_get_random();
+
+    if ( unlikely(!next) )
+        next = val ?: NOW();
+
+    if ( !val )
+    {
+        unsigned int i;
+
+        for ( i = 0; i < sizeof(val) * 8; i += 11 )
+        {
+            next = next * 1103515245 + 12345;
+            val |= ((next >> 16) & 0x7ff) << i;
+        }
+    }
+
+    this_cpu(seed) = next;
+
+    return val;
+}
--- a/xen/include/asm-arm/percpu.h
+++ b/xen/include/asm-arm/percpu.h
@@ -2,6 +2,17 @@
 #define __ARM_PERCPU_H__
 
 #ifndef __ASSEMBLY__
+
+#include <xen/types.h>
+#include <asm/cpregs.h>
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/processor.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/processor.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 extern char __per_cpu_start[], __per_cpu_data_end[];
 extern unsigned long __per_cpu_offset[NR_CPUS];
 void percpu_init_areas(void);
--- /dev/null
+++ b/xen/include/asm-arm/random.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_RANDOM_H__
+#define __ASM_RANDOM_H__
+
+static inline unsigned int arch_get_random(void)
+{
+    return 0;
+}
+
+#endif /* __ASM_RANDOM_H__ */
--- /dev/null
+++ b/xen/include/asm-x86/random.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_RANDOM_H__
+#define __ASM_RANDOM_H__
+
+#include <asm/processor.h>
+
+static inline unsigned int arch_get_random(void)
+{
+    unsigned int val = 0;
+
+    if ( cpu_has(&current_cpu_data, X86_FEATURE_RDRAND) )
+        asm ( ".byte 0x0f,0xc7,0xf0" : "+a" (val) );
+
+    return val;
+}
+
+#endif /* __ASM_RANDOM_H__ */
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -77,6 +77,7 @@
 
 #include <xen/bitmap.h>
 #include <xen/kernel.h>
+#include <xen/random.h>
 
 typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 
@@ -245,7 +246,23 @@ static inline int cpumask_cycle(int n, c
     return nxt;
 }
 
-#define cpumask_any(srcp) cpumask_first(srcp)
+static inline unsigned int cpumask_any(const cpumask_t *srcp)
+{
+    unsigned int cpu = cpumask_first(srcp);
+    unsigned int w = cpumask_weight(srcp);
+
+    if ( w > 1 && cpu < nr_cpu_ids )
+        for ( w = get_random() % w; w--; )
+        {
+            unsigned int next = cpumask_next(cpu, srcp);
+
+            if ( next >= nr_cpu_ids )
+                break;
+            cpu = next;
+        }
+
+    return cpu;
+}
 
 /*
  * Special-case data structure for "single bit set only" constant CPU masks.
--- /dev/null
+++ b/xen/include/xen/random.h
@@ -0,0 +1,6 @@
+#ifndef __XEN_RANDOM_H__
+#define __XEN_RANDOM_H__
+
+unsigned int get_random(void);
+
+#endif /* __XEN_RANDOM_H__ */



Attachment: random.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.