WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Introduce and use a per-CPU read-mostly s

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Introduce and use a per-CPU read-mostly sub-section
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 13 Jul 2009 04:00:20 -0700
Delivery-date: Mon, 13 Jul 2009 04:00:49 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1247481161 -3600
# Node ID 3952eaeb70b0f855365b5f66e0a877a99161a790
# Parent  ed76e4bbea83c59e4ada9ac4e8134bf7d4d0063e
Introduce and use a per-CPU read-mostly sub-section

Since mixing data that only gets setup once and then (perhaps
frequently) gets read by remote CPUs with data that the local CPU may
modify (again, perhaps frequently) still causes undesirable cache
protocol related bus traffic, separate the former class of objects
from the latter.

These objects converted here are just picked based on their write-once
(or write-very-rarely) properties; perhaps some more adjustments may
be desirable subsequently. The primary users of the new sub-section
will result from the next patch.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/ia64/xen/vhpt.c                    |    4 ++--
 xen/arch/ia64/xen/xen.lds.S                 |    2 ++
 xen/arch/x86/hvm/vmx/vmcs.c                 |    2 +-
 xen/arch/x86/setup.c                        |    4 ++--
 xen/arch/x86/traps.c                        |    2 +-
 xen/arch/x86/x86_32/traps.c                 |    2 +-
 xen/arch/x86/x86_32/xen.lds.S               |   11 ++++++++---
 xen/arch/x86/x86_64/mm.c                    |    2 +-
 xen/arch/x86/x86_64/xen.lds.S               |   11 ++++++++---
 xen/common/kexec.c                          |    2 +-
 xen/common/tmem_xen.c                       |    4 ++--
 xen/common/trace.c                          |    4 ++--
 xen/include/asm-ia64/linux-xen/asm/percpu.h |    6 +++---
 xen/include/asm-x86/percpu.h                |    6 +++---
 xen/include/xen/percpu.h                    |   10 ++++++++++
 15 files changed, 47 insertions(+), 25 deletions(-)

diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/ia64/xen/vhpt.c  Mon Jul 13 11:32:41 2009 +0100
@@ -21,8 +21,8 @@
 #include <asm/vcpumask.h>
 #include <asm/vmmu.h>
 
-DEFINE_PER_CPU (unsigned long, vhpt_paddr);
-DEFINE_PER_CPU (unsigned long, vhpt_pend);
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, vhpt_paddr);
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, vhpt_pend);
 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
 DEFINE_PER_CPU(volatile u32, vhpt_tlbflush_timestamp);
 #endif
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/ia64/xen/xen.lds.S
--- a/xen/arch/ia64/xen/xen.lds.S       Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/ia64/xen/xen.lds.S       Mon Jul 13 11:32:41 2009 +0100
@@ -187,6 +187,8 @@ SECTIONS
        {
                __per_cpu_start = .;
                *(.data.percpu)
+               . = ALIGN(SMP_CACHE_BYTES);
+               *(.data.percpu.read_mostly)
                __per_cpu_end = .;
        }
   . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Mon Jul 13 11:32:41 2009 +0100
@@ -66,7 +66,7 @@ u32 vmx_vmentry_control __read_mostly;
 u32 vmx_vmentry_control __read_mostly;
 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
 
-static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs);
+static DEFINE_PER_CPU_READ_MOSTLY(struct vmcs_struct *, host_vmcs);
 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
 static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
 
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/x86/setup.c      Mon Jul 13 11:32:41 2009 +0100
@@ -111,9 +111,9 @@ extern void vesa_mtrr_init(void);
 extern void vesa_mtrr_init(void);
 extern void init_tmem(void);
 
-DEFINE_PER_CPU(struct desc_struct *, gdt_table) = boot_cpu_gdt_table;
+DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table) = 
boot_cpu_gdt_table;
 #ifdef CONFIG_COMPAT
-DEFINE_PER_CPU(struct desc_struct *, compat_gdt_table)
+DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, compat_gdt_table)
     = boot_cpu_compat_gdt_table;
 #endif
 
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/x86/traps.c      Mon Jul 13 11:32:41 2009 +0100
@@ -78,7 +78,7 @@ char opt_nmi[10] = "fatal";
 #endif
 string_param("nmi", opt_nmi);
 
-DEFINE_PER_CPU(u32, ler_msr);
+DEFINE_PER_CPU_READ_MOSTLY(u32, ler_msr);
 
 /* Master table, used by CPU0. */
 idt_entry_t idt_table[IDT_ENTRIES];
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/x86/x86_32/traps.c       Mon Jul 13 11:32:41 2009 +0100
@@ -188,7 +188,7 @@ void show_page_walk(unsigned long addr)
     unmap_domain_page(l1t);
 }
 
-DEFINE_PER_CPU(struct tss_struct *, doublefault_tss);
+DEFINE_PER_CPU_READ_MOSTLY(struct tss_struct *, doublefault_tss);
 static unsigned char __attribute__ ((__section__ (".bss.page_aligned")))
     boot_cpu_doublefault_space[PAGE_SIZE];
 
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/x86/x86_32/xen.lds.S
--- a/xen/arch/x86/x86_32/xen.lds.S     Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/x86/x86_32/xen.lds.S     Mon Jul 13 11:32:41 2009 +0100
@@ -4,6 +4,7 @@
  */
 
 #include <xen/config.h>
+#include <xen/cache.h>
 #include <asm/page.h>
 #include <asm/percpu.h>
 #undef ENTRY
@@ -69,9 +70,13 @@ SECTIONS
   __init_end = .;
 
   __per_cpu_shift = PERCPU_SHIFT; /* kdump assist */
-  __per_cpu_start = .;
-  .data.percpu : { *(.data.percpu) } :text
-  __per_cpu_data_end = .;
+  .data.percpu : {
+       __per_cpu_start = .;
+       *(.data.percpu)
+       . = ALIGN(SMP_CACHE_BYTES);
+       *(.data.percpu.read_mostly)
+       __per_cpu_data_end = .;
+  } :text
   . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
   . = ALIGN(PAGE_SIZE);
   __per_cpu_end = .;
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Mon Jul 13 11:32:41 2009 +0100
@@ -37,7 +37,7 @@ unsigned int m2p_compat_vstart = __HYPER
 unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 #endif
 
-DEFINE_PER_CPU(void *, compat_arg_xlat);
+DEFINE_PER_CPU_READ_MOSTLY(void *, compat_arg_xlat);
 
 /* Top-level master (and idle-domain) page directory. */
 l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/arch/x86/x86_64/xen.lds.S
--- a/xen/arch/x86/x86_64/xen.lds.S     Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/arch/x86/x86_64/xen.lds.S     Mon Jul 13 11:32:41 2009 +0100
@@ -2,6 +2,7 @@
 /* Modified for x86-64 Xen by Keir Fraser */
 
 #include <xen/config.h>
+#include <xen/cache.h>
 #include <asm/page.h>
 #include <asm/percpu.h>
 #undef ENTRY
@@ -67,9 +68,13 @@ SECTIONS
   __init_end = .;
 
   __per_cpu_shift = PERCPU_SHIFT; /* kdump assist */
-  __per_cpu_start = .;
-  .data.percpu : { *(.data.percpu) } :text
-  __per_cpu_data_end = .;
+  .data.percpu : {
+       __per_cpu_start = .;
+       *(.data.percpu)
+       . = ALIGN(SMP_CACHE_BYTES);
+       *(.data.percpu.read_mostly)
+       __per_cpu_data_end = .;
+  } :text
   . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
   . = ALIGN(PAGE_SIZE);
   __per_cpu_end = .;
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/common/kexec.c
--- a/xen/common/kexec.c        Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/common/kexec.c        Mon Jul 13 11:32:41 2009 +0100
@@ -27,7 +27,7 @@
 #include <compat/kexec.h>
 #endif
 
-static DEFINE_PER_CPU(void *, crash_notes);
+static DEFINE_PER_CPU_READ_MOSTLY(void *, crash_notes);
 
 static Elf_Note *xen_crash_note;
 
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c     Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/common/tmem_xen.c     Mon Jul 13 11:32:41 2009 +0100
@@ -36,8 +36,8 @@ DECL_CYC_COUNTER(pg_copy);
  * allocated iff opt_tmem_compress */
 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
 #define LZO_DSTMEM_PAGES 2
-static DEFINE_PER_CPU(unsigned char *, workmem);
-static DEFINE_PER_CPU(unsigned char *, dstmem);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
 
 #ifdef COMPARE_COPY_PAGE_SSE2
 #include <asm/flushtlb.h>  /* REMOVE ME AFTER TEST */
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/common/trace.c
--- a/xen/common/trace.c        Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/common/trace.c        Mon Jul 13 11:32:41 2009 +0100
@@ -46,8 +46,8 @@ integer_param("tbuf_size", opt_tbuf_size
 integer_param("tbuf_size", opt_tbuf_size);
 
 /* Pointers to the meta-data objects for all system trace buffers */
-static DEFINE_PER_CPU(struct t_buf *, t_bufs);
-static DEFINE_PER_CPU(unsigned char *, t_data);
+static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
 static int data_size;
 
 /* High water mark for trace buffers; */
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/include/asm-ia64/linux-xen/asm/percpu.h
--- a/xen/include/asm-ia64/linux-xen/asm/percpu.h       Mon Jul 13 11:31:34 
2009 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/percpu.h       Mon Jul 13 11:32:41 
2009 +0100
@@ -26,9 +26,9 @@
        extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
 
 /* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name)                             \
-       __attribute__((__section__(".data.percpu")))            \
-       __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
+#define __DEFINE_PER_CPU(type, name, suffix)                           \
+       __attribute__((__section__(".data.percpu" #suffix)))            \
+       __SMALL_ADDR_AREA __typeof__(type) per_cpu_##name
 
 /*
  * Pretty much a literal copy of asm-generic/percpu.h, except that 
percpu_modcopy() is an
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/include/asm-x86/percpu.h
--- a/xen/include/asm-x86/percpu.h      Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/include/asm-x86/percpu.h      Mon Jul 13 11:32:41 2009 +0100
@@ -5,9 +5,9 @@
 #define PERCPU_SIZE  (1UL << PERCPU_SHIFT)
 
 /* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name)                      \
-    __attribute__((__section__(".data.percpu")))        \
-    __typeof__(type) per_cpu__##name
+#define __DEFINE_PER_CPU(type, name, suffix)                    \
+    __attribute__((__section__(".data.percpu" #suffix)))        \
+    __typeof__(type) per_cpu_##name
 
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu)  \
diff -r ed76e4bbea83 -r 3952eaeb70b0 xen/include/xen/percpu.h
--- a/xen/include/xen/percpu.h  Mon Jul 13 11:31:34 2009 +0100
+++ b/xen/include/xen/percpu.h  Mon Jul 13 11:32:41 2009 +0100
@@ -3,6 +3,16 @@
 
 #include <xen/config.h>
 #include <asm/percpu.h>
+
+/*
+ * Separate out the type, so (int[3], foo) works.
+ *
+ * The _##name concatenation is being used here to prevent 'name' from getting
+ * macro expanded, while still allowing a per-architecture symbol name prefix.
+ */
+#define DEFINE_PER_CPU(type, name) __DEFINE_PER_CPU(type, _##name, )
+#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
+       __DEFINE_PER_CPU(type, _##name, .read_mostly)
 
 /* Preferred on Xen. Also see arch-defined per_cpu(). */
 #define this_cpu(var)    __get_cpu_var(var)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Introduce and use a per-CPU read-mostly sub-section, Xen patchbot-unstable <=