ChangeSet 1.1420.1.1, 2005/04/01 14:57:39+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Fix FS/GS saving on Linux 2.6.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
arch/xen/i386/kernel/process.c | 17 +----------------
include/asm-xen/asm-i386/mmu_context.h | 24 +++++++++++++++++++++---
2 files changed, 22 insertions(+), 19 deletions(-)
diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c 2005-04-01
09:03:16 -05:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c 2005-04-01
09:03:16 -05:00
@@ -445,22 +445,7 @@
physdev_op_t iopl_op, iobmp_op;
multicall_entry_t _mcl[8], *mcl = _mcl;
- /*
- * Save away %fs and %gs. No need to save %es and %ds, as
- * those are always kernel segments while inside the kernel.
- */
- asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
- asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
-
- /*
- * We clobber FS and GS here so that we avoid a GPF when restoring
- * previous task's FS/GS values in Xen when the LDT is switched.
- */
- __asm__ __volatile__ (
- "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
- "eax" );
-
- /* never put a printk in __switch_to... printk() calls wake_up*()
indirectly */
+ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
/*
* This is basically '__unlazy_fpu', except that we queue a
diff -Nru a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
2005-04-01 09:03:16 -05:00
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
2005-04-01 09:03:16 -05:00
@@ -16,13 +16,31 @@
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct
*tsk)
{
-#ifdef CONFIG_SMP
+#if 0 /* XEN */
unsigned cpu = smp_processor_id();
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
#endif
}
+#define prepare_arch_switch(rq,next) __prepare_arch_switch()
+#define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
+#define task_running(rq, p) ((rq)->curr == (p))
+
+static inline void __prepare_arch_switch(void)
+{
+ /*
+ * Save away %fs and %gs. No need to save %es and %ds, as those
+ * are always kernel segments while inside the kernel. Must
+ * happen before reload of cr3/ldt (i.e., not in __switch_to).
+ */
+ __asm__ __volatile__ ( "movl %%fs,%0 ; movl %%gs,%1"
+ : "=m" (*(int *)¤t->thread.fs),
+ "=m" (*(int *)¤t->thread.gs));
+ __asm__ __volatile__ ( "movl %0,%%fs ; movl %0,%%gs"
+ : : "r" (0) );
+}
+
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
@@ -32,7 +50,7 @@
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
+#if 0 /* XEN */
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
per_cpu(cpu_tlbstate, cpu).active_mm = next;
#endif
@@ -47,7 +65,7 @@
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context, cpu);
}
-#ifdef CONFIG_SMP
+#if 0 /* XEN */
else {
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|