# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1184237342 -3600
# Node ID e704430b5b32ac1ac52361e402571ca28009fa7d
# Parent bd2f9628114e54e4dbe4ae4249244353eed945e4
x86: Various cleanups around CR4 handling, cpu_possible_map, and VMX
initialisation.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/acpi/power.c | 5 +-
xen/arch/x86/cpu/common.c | 3 -
xen/arch/x86/crash.c | 4 -
xen/arch/x86/domain.c | 12 ----
xen/arch/x86/hvm/hvm.c | 5 --
xen/arch/x86/hvm/svm/svm.c | 4 -
xen/arch/x86/hvm/vmx/vmcs.c | 91 ++++++++++++++++++++++---------------
xen/arch/x86/hvm/vmx/vmx.c | 80 +++++++++++---------------------
xen/arch/x86/i8259.c | 3 -
xen/arch/x86/machine_kexec.c | 4 -
xen/arch/x86/mm.c | 2
xen/arch/x86/shutdown.c | 2
xen/arch/x86/smp.c | 2
xen/arch/x86/smpboot.c | 4 -
xen/include/asm-x86/hvm/hvm.h | 25 +++++-----
xen/include/asm-x86/hvm/support.h | 1
xen/include/asm-x86/hvm/vmx/vmcs.h | 10 +---
xen/include/asm-x86/page.h | 15 ------
xen/include/asm-x86/processor.h | 18 -------
xen/include/xen/cpumask.h | 2
20 files changed, 118 insertions(+), 174 deletions(-)
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/acpi/power.c
--- a/xen/arch/x86/acpi/power.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/acpi/power.c Thu Jul 12 11:49:02 2007 +0100
@@ -118,7 +118,7 @@ int enter_state(u32 state)
freeze_domains();
- hvm_suspend_cpu();
+ hvm_cpu_down();
pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
acpi_states[state]);
@@ -152,7 +152,8 @@ int enter_state(u32 state)
Done:
local_irq_restore(flags);
- hvm_resume_cpu();
+ if ( !hvm_cpu_up() )
+ BUG();
thaw_domains();
spin_unlock(&pm_lock);
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/cpu/common.c Thu Jul 12 11:49:02 2007 +0100
@@ -557,9 +557,6 @@ void __devinit cpu_init(void)
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
- if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-
*(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
*(unsigned long *)(&gdt_load[2]) = GDT_VIRT_START(current);
__asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/crash.c
--- a/xen/arch/x86/crash.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/crash.c Thu Jul 12 11:49:02 2007 +0100
@@ -43,7 +43,7 @@ static int crash_nmi_callback(struct cpu
kexec_crash_save_cpu();
disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi);
- hvm_disable();
+ hvm_cpu_down();
for ( ; ; )
__asm__ __volatile__ ( "hlt" );
@@ -99,7 +99,7 @@ void machine_crash_shutdown(void)
disable_IO_APIC();
- hvm_disable();
+ hvm_cpu_down();
info = kexec_crash_save_info();
info->dom0_pfn_to_mfn_frame_list_list =
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/domain.c Thu Jul 12 11:49:02 2007 +0100
@@ -43,6 +43,7 @@
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
#include <asm/msr.h>
+#include <asm/nmi.h>
#ifdef CONFIG_COMPAT
#include <compat/vcpu.h>
#endif
@@ -76,10 +77,7 @@ static void default_idle(void)
local_irq_enable();
}
-#ifdef CONFIG_HOTPLUG_CPU
-#include <asm/nmi.h>
-/* We don't actually take CPU down, just spin without interrupts. */
-static inline void play_dead(void)
+static void play_dead(void)
{
__cpu_disable();
/* This must be done before dead CPU ack */
@@ -94,12 +92,6 @@ static inline void play_dead(void)
for ( ; ; )
halt();
}
-#else
-static inline void play_dead(void)
-{
- BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
void idle_loop(void)
{
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c Thu Jul 12 11:49:02 2007 +0100
@@ -74,11 +74,6 @@ void hvm_enable(struct hvm_function_tabl
hvm_funcs = *fns;
hvm_enabled = 1;
-}
-
-void hvm_disable(void)
-{
- hvm_suspend_cpu();
}
void hvm_stts(struct vcpu *v)
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Jul 12 11:49:02 2007 +0100
@@ -94,7 +94,7 @@ static void svm_inject_exception(struct
vmcb->eventinj = event;
}
-static void svm_suspend_cpu(void)
+static void svm_cpu_down(void)
{
write_efer(read_efer() & ~EFER_SVME);
}
@@ -973,7 +973,7 @@ static int svm_event_injection_faulted(s
static struct hvm_function_table svm_function_table = {
.name = "SVM",
- .suspend_cpu = svm_suspend_cpu,
+ .cpu_down = svm_cpu_down,
.domain_initialise = svm_domain_initialise,
.domain_destroy = svm_domain_destroy,
.vcpu_initialise = svm_vcpu_initialise,
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Thu Jul 12 11:49:02 2007 +0100
@@ -66,7 +66,7 @@ static u32 adjust_vmx_controls(u32 ctl_m
return ctl;
}
-void vmx_init_vmcs_config(void)
+static void vmx_init_vmcs_config(void)
{
u32 vmx_msr_low, vmx_msr_high, min, opt;
u32 _vmx_pin_based_exec_control;
@@ -130,8 +130,9 @@ void vmx_init_vmcs_config(void)
rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
- if ( smp_processor_id() == 0 )
- {
+ if ( !vmx_pin_based_exec_control )
+ {
+ /* First time through. */
vmcs_revision_id = vmx_msr_low;
vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
@@ -142,6 +143,7 @@ void vmx_init_vmcs_config(void)
}
else
{
+ /* Globals are already initialised: re-check them. */
BUG_ON(vmcs_revision_id != vmx_msr_low);
BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
@@ -189,7 +191,7 @@ static void __vmx_clear_vmcs(void *info)
struct vcpu *v = info;
struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
- /* Otherwise we can nest (vmx_suspend_cpu() vs. vmx_clear_vmcs()). */
+ /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
ASSERT(!local_irq_is_enabled());
if ( arch_vmx->active_cpu == smp_processor_id() )
@@ -234,7 +236,54 @@ static void vmx_load_vmcs(struct vcpu *v
local_irq_restore(flags);
}
-void vmx_suspend_cpu(void)
+int vmx_cpu_up(void)
+{
+ u32 eax, edx;
+ int cpu = smp_processor_id();
+
+ BUG_ON(!(read_cr4() & X86_CR4_VMXE));
+
+ rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
+
+ if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
+ {
+ if ( !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) )
+ {
+ printk("CPU%d: VMX disabled\n", cpu);
+ return 0;
+ }
+ }
+ else
+ {
+ wrmsr(IA32_FEATURE_CONTROL_MSR,
+ IA32_FEATURE_CONTROL_MSR_LOCK |
+ IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
+ }
+
+ vmx_init_vmcs_config();
+
+ INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
+
+ if ( this_cpu(host_vmcs) == NULL )
+ {
+ this_cpu(host_vmcs) = vmx_alloc_vmcs();
+ if ( this_cpu(host_vmcs) == NULL )
+ {
+ printk("CPU%d: Could not allocate host VMCS\n", cpu);
+ return 0;
+ }
+ }
+
+ if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
+ {
+ printk("CPU%d: VMXON failed\n", cpu);
+ return 0;
+ }
+
+ return 1;
+}
+
+void vmx_cpu_down(void)
{
struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
unsigned long flags;
@@ -245,23 +294,10 @@ void vmx_suspend_cpu(void)
__vmx_clear_vmcs(list_entry(active_vmcs_list->next,
struct vcpu, arch.hvm_vmx.active_list));
- if ( read_cr4() & X86_CR4_VMXE )
- {
- __vmxoff();
- clear_in_cr4(X86_CR4_VMXE);
- }
+ BUG_ON(!(read_cr4() & X86_CR4_VMXE));
+ __vmxoff();
local_irq_restore(flags);
-}
-
-void vmx_resume_cpu(void)
-{
- if ( !read_cr4() & X86_CR4_VMXE )
- {
- set_in_cr4(X86_CR4_VMXE);
- if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
- BUG();
- }
}
void vmx_vmcs_enter(struct vcpu *v)
@@ -292,21 +328,6 @@ void vmx_vmcs_exit(struct vcpu *v)
spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
vcpu_unpause(v);
-}
-
-struct vmcs_struct *vmx_alloc_host_vmcs(void)
-{
- ASSERT(this_cpu(host_vmcs) == NULL);
- this_cpu(host_vmcs) = vmx_alloc_vmcs();
- INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
- return this_cpu(host_vmcs);
-}
-
-void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
-{
- ASSERT(vmcs == this_cpu(host_vmcs));
- vmx_free_vmcs(vmcs);
- this_cpu(host_vmcs) = NULL;
}
struct xgt_desc {
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jul 12 11:49:02 2007 +0100
@@ -1262,66 +1262,44 @@ static struct hvm_function_table vmx_fun
.init_ap_context = vmx_init_ap_context,
.init_hypercall_page = vmx_init_hypercall_page,
.event_injection_faulted = vmx_event_injection_faulted,
- .suspend_cpu = vmx_suspend_cpu,
- .resume_cpu = vmx_resume_cpu,
+ .cpu_up = vmx_cpu_up,
+ .cpu_down = vmx_cpu_down,
};
-int start_vmx(void)
-{
- u32 eax, edx;
- struct vmcs_struct *vmcs;
-
- /*
- * Xen does not fill x86_capability words except 0.
- */
+void start_vmx(void)
+{
+ static int bootstrapped;
+
+ if ( bootstrapped )
+ {
+ if ( hvm_enabled && !vmx_cpu_up() )
+ {
+ printk("VMX: FATAL: failed to initialise CPU%d!\n",
+ smp_processor_id());
+ BUG();
+ }
+ return;
+ }
+
+ bootstrapped = 1;
+
+ /* Xen does not fill x86_capability words except 0. */
boot_cpu_data.x86_capability[4] = cpuid_ecx(1);
if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
- return 0;
-
- rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
-
- if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
- {
- if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 )
- {
- printk("VMX disabled by Feature Control MSR.\n");
- return 0;
- }
- }
- else
- {
- wrmsr(IA32_FEATURE_CONTROL_MSR,
- IA32_FEATURE_CONTROL_MSR_LOCK |
- IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
- }
+ return;
set_in_cr4(X86_CR4_VMXE);
- vmx_init_vmcs_config();
-
- if ( smp_processor_id() == 0 )
- setup_vmcs_dump();
-
- if ( (vmcs = vmx_alloc_host_vmcs()) == NULL )
- {
- clear_in_cr4(X86_CR4_VMXE);
- printk("Failed to allocate host VMCS\n");
- return 0;
- }
-
- if ( __vmxon(virt_to_maddr(vmcs)) )
- {
- clear_in_cr4(X86_CR4_VMXE);
- printk("VMXON failed\n");
- vmx_free_host_vmcs(vmcs);
- return 0;
- }
+ if ( !vmx_cpu_up() )
+ {
+ printk("VMX: failed to initialise.\n");
+ return;
+ }
+
+ setup_vmcs_dump();
vmx_save_host_msrs();
-
- if ( smp_processor_id() != 0 )
- return 1;
hvm_enable(&vmx_function_table);
@@ -1339,8 +1317,6 @@ int start_vmx(void)
disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP);
disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP);
}
-
- return 1;
}
/*
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/i8259.c
--- a/xen/arch/x86/i8259.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/i8259.c Thu Jul 12 11:49:02 2007 +0100
@@ -397,7 +397,8 @@ void __init init_IRQ(void)
irq_desc[i].depth = 1;
spin_lock_init(&irq_desc[i].lock);
cpus_setall(irq_desc[i].affinity);
- set_intr_gate(i, interrupt[i]);
+ if ( i >= 0x20 )
+ set_intr_gate(i, interrupt[i]);
}
for ( i = 0; i < 16; i++ )
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/machine_kexec.c
--- a/xen/arch/x86/machine_kexec.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/machine_kexec.c Thu Jul 12 11:49:02 2007 +0100
@@ -82,10 +82,8 @@ static void __machine_reboot_kexec(void
smp_send_stop();
-#ifdef CONFIG_X86_IO_APIC
disable_IO_APIC();
-#endif
- hvm_disable();
+ hvm_cpu_down();
machine_kexec(image);
}
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/mm.c Thu Jul 12 11:49:02 2007 +0100
@@ -3642,8 +3642,6 @@ static void __memguard_change_range(void
unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
/* Ensure we are dealing with a page-aligned whole number of pages. */
- ASSERT((_p&PAGE_MASK) != 0);
- ASSERT((_l&PAGE_MASK) != 0);
ASSERT((_p&~PAGE_MASK) == 0);
ASSERT((_l&~PAGE_MASK) == 0);
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/shutdown.c
--- a/xen/arch/x86/shutdown.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/shutdown.c Thu Jul 12 11:49:02 2007 +0100
@@ -222,7 +222,7 @@ void machine_restart(char *cmd)
*/
smp_send_stop();
disable_IO_APIC();
- hvm_disable();
+ hvm_cpu_down();
/* Rebooting needs to touch the page at absolute address 0. */
*((unsigned short *)__va(0x472)) = reboot_mode;
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/smp.c Thu Jul 12 11:49:02 2007 +0100
@@ -310,7 +310,7 @@ static void stop_this_cpu (void *dummy)
local_irq_disable();
disable_local_APIC();
- hvm_disable();
+ hvm_cpu_down();
for ( ; ; )
__asm__ __volatile__ ( "hlt" );
diff -r bd2f9628114e -r e704430b5b32 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/arch/x86/smpboot.c Thu Jul 12 11:49:02 2007 +0100
@@ -87,11 +87,7 @@ cpumask_t cpu_callin_map;
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
EXPORT_SYMBOL(cpu_callout_map);
-#ifdef CONFIG_HOTPLUG_CPU
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-#else
cpumask_t cpu_possible_map;
-#endif
EXPORT_SYMBOL(cpu_possible_map);
static cpumask_t smp_commenced_mask;
diff -r bd2f9628114e -r e704430b5b32 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Thu Jul 12 11:49:02 2007 +0100
@@ -156,8 +156,8 @@ struct hvm_function_table {
int (*event_injection_faulted)(struct vcpu *v);
- void (*suspend_cpu)(void);
- void (*resume_cpu)(void);
+ int (*cpu_up)(void);
+ void (*cpu_down)(void);
};
extern struct hvm_function_table hvm_funcs;
@@ -314,16 +314,17 @@ static inline int hvm_event_injection_fa
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK (1U << TRAP_machine_check)
-static inline void hvm_suspend_cpu(void)
-{
- if ( hvm_funcs.suspend_cpu )
- hvm_funcs.suspend_cpu();
-}
-
-static inline void hvm_resume_cpu(void)
-{
- if ( hvm_funcs.resume_cpu )
- hvm_funcs.resume_cpu();
+static inline int hvm_cpu_up(void)
+{
+ if ( hvm_funcs.cpu_up )
+ return hvm_funcs.cpu_up();
+ return 1;
+}
+
+static inline void hvm_cpu_down(void)
+{
+ if ( hvm_funcs.cpu_down )
+ hvm_funcs.cpu_down();
}
#endif /* __ASM_X86_HVM_HVM_H__ */
diff -r bd2f9628114e -r e704430b5b32 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/include/asm-x86/hvm/support.h Thu Jul 12 11:49:02 2007 +0100
@@ -217,7 +217,6 @@ extern char hvm_io_bitmap[];
extern char hvm_io_bitmap[];
void hvm_enable(struct hvm_function_table *);
-void hvm_disable(void);
int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
diff -r bd2f9628114e -r e704430b5b32 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Jul 12 11:49:02 2007 +0100
@@ -24,12 +24,11 @@
#include <asm/hvm/vmx/cpu.h>
#include <public/hvm/vmx_assist.h>
-extern int start_vmx(void);
+extern void start_vmx(void);
extern void vmcs_dump_vcpu(void);
-extern void vmx_init_vmcs_config(void);
extern void setup_vmcs_dump(void);
-extern void vmx_suspend_cpu(void);
-extern void vmx_resume_cpu(void);
+extern int vmx_cpu_up(void);
+extern void vmx_cpu_down(void);
struct vmcs_struct {
u32 vmcs_revision_id;
@@ -88,9 +87,6 @@ struct arch_vmx_struct {
unsigned long irqbase_mode:1;
unsigned char pm_irqbase[2];
};
-
-struct vmcs_struct *vmx_alloc_host_vmcs(void);
-void vmx_free_host_vmcs(struct vmcs_struct *vmcs);
int vmx_create_vmcs(struct vcpu *v);
void vmx_destroy_vmcs(struct vcpu *v);
diff -r bd2f9628114e -r e704430b5b32 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/include/asm-x86/page.h Thu Jul 12 11:49:02 2007 +0100
@@ -294,19 +294,8 @@ void setup_idle_pagetable(void);
void setup_idle_pagetable(void);
#endif /* !defined(__ASSEMBLY__) */
-#define __pge_off() \
- do { \
- __asm__ __volatile__( \
- "mov %0, %%cr4; # turn off PGE " \
- : : "r" (mmu_cr4_features & ~X86_CR4_PGE) ); \
- } while ( 0 )
-
-#define __pge_on() \
- do { \
- __asm__ __volatile__( \
- "mov %0, %%cr4; # turn off PGE " \
- : : "r" (mmu_cr4_features) ); \
- } while ( 0 )
+#define __pge_off() write_cr4(mmu_cr4_features & ~X86_CR4_PGE)
+#define __pge_on() write_cr4(mmu_cr4_features)
#define _PAGE_PRESENT 0x001U
#define _PAGE_RW 0x002U
diff -r bd2f9628114e -r e704430b5b32 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/include/asm-x86/processor.h Thu Jul 12 11:49:02 2007 +0100
@@ -331,24 +331,8 @@ extern unsigned long mmu_cr4_features;
static always_inline void set_in_cr4 (unsigned long mask)
{
- unsigned long dummy;
mmu_cr4_features |= mask;
- __asm__ __volatile__ (
- "mov %%cr4,%0\n\t"
- "or %1,%0\n\t"
- "mov %0,%%cr4\n"
- : "=&r" (dummy) : "irg" (mask) );
-}
-
-static always_inline void clear_in_cr4 (unsigned long mask)
-{
- unsigned long dummy;
- mmu_cr4_features &= ~mask;
- __asm__ __volatile__ (
- "mov %%cr4,%0\n\t"
- "and %1,%0\n\t"
- "mov %0,%%cr4\n"
- : "=&r" (dummy) : "irg" (~mask) );
+ write_cr4(mmu_cr4_features);
}
/*
diff -r bd2f9628114e -r e704430b5b32 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Thu Jul 12 10:06:44 2007 +0100
+++ b/xen/include/xen/cpumask.h Thu Jul 12 11:49:02 2007 +0100
@@ -305,7 +305,7 @@ static inline int __cpulist_scnprintf(ch
* bitmap of size NR_CPUS.
*
* #ifdef CONFIG_HOTPLUG_CPU
- * cpu_possible_map - all NR_CPUS bits set
+ * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
* cpu_present_map - has bit 'cpu' set iff cpu is populated
* cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
* #else
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|