[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 18/44] x86/mm: Maintain the correct percpu mappings on context switch



Ensure the pagetables we are switching to have the correct percpu mappings in
them.  The _PGC_inuse_pgtable check ensures that the pagetables we edit aren't
in use elsewhere.

One complication however is context switching between two vcpus which both
require shadowing.  See the code comment for details.

Another complication is requiring a second percpu fixmap slot.  It limits
NR_CPUS to 254, but will be removed later.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/mm.c            |  9 +++++++++
 xen/arch/x86/pv/pt-shadow.c  | 14 +++++++++++++-
 xen/include/asm-x86/fixmap.h |  3 ++-
 3 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index deff4eb..57b3e25 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -505,6 +505,8 @@ void do_write_ptbase(struct vcpu *v, bool tlb_maintenance)
     unsigned long new_cr3;
     unsigned int cpu = smp_processor_id();
     unsigned long *this_curr_ptbase = &per_cpu(curr_ptbase, cpu);
+    l4_pgentry_t percpu_mappings = per_cpu(percpu_mappings, cpu);
+    l4_pgentry_t *new_l4t;
     struct page_info *new_pg;
 
     /* Check that %cr3 isn't being shuffled under our feet. */
@@ -520,6 +522,13 @@ void do_write_ptbase(struct vcpu *v, bool tlb_maintenance)
         /* Same cr3.  Check that it is still marked as in use. */
         ASSERT(test_bit(_PGC_inuse_pgtable, &new_pg->count_info));
 
+    /* Insert percpu mappings into the new pagetables. */
+    set_percpu_fixmap(cpu, PERCPU_FIXSLOT_LINEAR,
+                      l1e_from_paddr(new_cr3, __PAGE_HYPERVISOR_RW));
+    new_l4t = percpu_fix_to_virt(cpu, PERCPU_FIXSLOT_LINEAR);
+    new_l4t[l4_table_offset(PERCPU_LINEAR_START)] = percpu_mappings;
+    barrier();
+
     if ( tlb_maintenance )
         write_cr3(new_cr3);
     else
diff --git a/xen/arch/x86/pv/pt-shadow.c b/xen/arch/x86/pv/pt-shadow.c
index 46a0251..46c7b86 100644
--- a/xen/arch/x86/pv/pt-shadow.c
+++ b/xen/arch/x86/pv/pt-shadow.c
@@ -160,6 +160,7 @@ unsigned long pt_maybe_shadow(struct vcpu *v)
     local_irq_save(flags);
 
     {
+        unsigned int slot = l4_table_offset(PERCPU_LINEAR_START);
         l4_pgentry_t *l4t, *vcpu_l4t;
 
         set_percpu_fixmap(cpu, PERCPU_FIXSLOT_SHADOW,
@@ -170,7 +171,18 @@ unsigned long pt_maybe_shadow(struct vcpu *v)
         l4t = ptsh->shadow_l4_va;
         vcpu_l4t = percpu_fix_to_virt(cpu, PERCPU_FIXSLOT_SHADOW);
 
-        copy_page(l4t, vcpu_l4t);
+        /*
+         * Careful!  When context switching between two vcpus, both of which
+         * require shadowing, l4t[] may be the live pagetables.
+         *
+         * We mustn't clobber the PERCPU slot (with a zero, as vcpu_l4t[] will
+         * never have had a percpu mapping inserted into it).  The context
+         * switch logic will unconditionally insert the correct value anyway.
+         */
+        memcpy(l4t, vcpu_l4t,
+               sizeof(*l4t) * slot);
+        memcpy(&l4t[slot + 1], &vcpu_l4t[slot + 1],
+               sizeof(*l4t) * (L4_PAGETABLE_ENTRIES - (slot + 1)));
     }
 
     return ptsh->shadow_l4;
diff --git a/xen/include/asm-x86/fixmap.h b/xen/include/asm-x86/fixmap.h
index 748219f..c1b3bda 100644
--- a/xen/include/asm-x86/fixmap.h
+++ b/xen/include/asm-x86/fixmap.h
@@ -27,8 +27,9 @@
 #include <asm/msi.h>
 #include <acpi/apei.h>
 
-#define NR_PERCPU_SLOTS 1
+#define NR_PERCPU_SLOTS 2
 #define PERCPU_FIXSLOT_SHADOW 0
+#define PERCPU_FIXSLOT_LINEAR 1
 
 /*
  * Here we define all the compile-time 'special' virtual
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.