# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 9f9f569b0a1db07697426d5ebeb175b07ef034ed
# Parent 6555ca56d844c149fd457aa4f38a340c321b8ec3
[VMX] __vmread() and __vmwrite() now BUG on failure, rather than
returning the error to the caller.
None of our vmcs reads/writes should fail. In fact many callers were
ignoring failures, which can hide bugs (in fact it hid one in
vmx_restore_dr(), which was incorrectly using __vmread() before
the vmcs was loaded).
__vmread_safe() is used by the vmcs dump function, which is the only
function which can legitimately fail a vmcs access.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/svm/svm.c | 34 +-
xen/arch/x86/hvm/svm/vmcb.c | 5
xen/arch/x86/hvm/vmx/io.c | 18 -
xen/arch/x86/hvm/vmx/vmcs.c | 215 ++++++++--------
xen/arch/x86/hvm/vmx/vmx.c | 485 ++++++++++++++++++--------------------
xen/include/asm-x86/hvm/vmx/vmx.h | 107 +++-----
6 files changed, 399 insertions(+), 465 deletions(-)
diff -r 6555ca56d844 -r 9f9f569b0a1d xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Nov 08 15:11:18 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Nov 08 16:43:50 2006 +0000
@@ -424,17 +424,21 @@ static inline int long_mode_do_msr_write
static inline void svm_save_dr(struct vcpu *v)
{
- if (v->arch.hvm_vcpu.flag_dr_dirty)
- {
- /* clear the DR dirty flag and re-enable intercepts for DR accesses */
- v->arch.hvm_vcpu.flag_dr_dirty = 0;
- v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
-
- savedebug(&v->arch.guest_context, 0);
- savedebug(&v->arch.guest_context, 1);
- savedebug(&v->arch.guest_context, 2);
- savedebug(&v->arch.guest_context, 3);
- }
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ return;
+
+ /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
+ v->arch.hvm_vcpu.flag_dr_dirty = 0;
+ v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
+
+ savedebug(&v->arch.guest_context, 0);
+ savedebug(&v->arch.guest_context, 1);
+ savedebug(&v->arch.guest_context, 2);
+ savedebug(&v->arch.guest_context, 3);
+ v->arch.guest_context.debugreg[6] = vmcb->dr6;
+ v->arch.guest_context.debugreg[7] = vmcb->dr7;
}
@@ -444,17 +448,13 @@ static inline void __restore_debug_regis
loaddebug(&v->arch.guest_context, 1);
loaddebug(&v->arch.guest_context, 2);
loaddebug(&v->arch.guest_context, 3);
+ /* DR6 and DR7 are loaded from the VMCB. */
}
static inline void svm_restore_dr(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- if (!vmcb)
- return;
-
- if (unlikely(vmcb->dr7 & 0xFF))
+ if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
__restore_debug_registers(v);
}
diff -r 6555ca56d844 -r 9f9f569b0a1d xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Wed Nov 08 15:11:18 2006 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Nov 08 16:43:50 2006 +0000
@@ -91,7 +91,6 @@ static int construct_vmcb(struct vcpu *v
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
struct vmcb_struct *vmcb = arch_svm->vmcb;
segment_attributes_t attrib;
- unsigned long dr7;
/* Always flush the TLB on VMRUN. */
vmcb->tlb_control = 1;
@@ -203,10 +202,6 @@ static int construct_vmcb(struct vcpu *v
arch_svm->cpu_shadow_cr4 =
read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
vmcb->cr4 = arch_svm->cpu_shadow_cr4 | SVM_CR4_HOST_MASK;
-
- /* Guest DR7. */
- __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
- vmcb->dr7 = dr7;
shadow_update_paging_modes(v);
vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
diff -r 6555ca56d844 -r 9f9f569b0a1d xen/arch/x86/hvm/vmx/io.c
--- a/xen/arch/x86/hvm/vmx/io.c Wed Nov 08 15:11:18 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/io.c Wed Nov 08 16:43:50 2006 +0000
@@ -63,9 +63,7 @@ disable_irq_window(struct vcpu *v)
static inline int is_interruptibility_state(void)
{
- int interruptibility;
- __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility);
- return interruptibility;
+ return __vmread(GUEST_INTERRUPTIBILITY_INFO);
}
#ifdef __x86_64__
@@ -129,7 +127,7 @@ asmlinkage void vmx_intr_assist(void)
}
/* This could be moved earlier in the VMX resume sequence. */
- __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
+ idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
@@ -138,14 +136,12 @@ asmlinkage void vmx_intr_assist(void)
* and interrupts. If we get here then delivery of some event caused a
* fault, and this always results in defined VM_EXIT_INSTRUCTION_LEN.
*/
- __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len); /* Safe */
+ inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
__vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
- if (unlikely(idtv_info_field & 0x800)) { /* valid error code */
- unsigned long error_code;
- __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
- __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
- }
+ if (unlikely(idtv_info_field & 0x800)) /* valid error code */
+ __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ __vmread(IDT_VECTORING_ERROR_CODE));
if (unlikely(has_ext_irq))
enable_irq_window(v);
@@ -163,7 +159,7 @@ asmlinkage void vmx_intr_assist(void)
return;
}
- __vmread(GUEST_RFLAGS, &eflags);
+ eflags = __vmread(GUEST_RFLAGS);
if (irq_masked(eflags)) {
enable_irq_window(v);
return;
diff -r 6555ca56d844 -r 9f9f569b0a1d xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Nov 08 15:11:18 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Nov 08 16:43:50 2006 +0000
@@ -257,7 +257,7 @@ struct host_execution_env {
static void vmx_set_host_env(struct vcpu *v)
{
- unsigned int tr, cpu, error = 0;
+ unsigned int tr, cpu;
struct host_execution_env host_env;
struct Xgt_desc_struct desc;
@@ -265,93 +265,95 @@ static void vmx_set_host_env(struct vcpu
__asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
host_env.idtr_limit = desc.size;
host_env.idtr_base = desc.address;
- error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
+ __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
__asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
host_env.gdtr_limit = desc.size;
host_env.gdtr_base = desc.address;
- error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
+ __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
__asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
host_env.tr_selector = tr;
host_env.tr_limit = sizeof(struct tss_struct);
host_env.tr_base = (unsigned long) &init_tss[cpu];
- error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
- error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
- error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
-}
-
-static int construct_vmcs(struct vcpu *v)
-{
- int error = 0;
- unsigned long tmp, cr0, cr4;
+ __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
+ __vmwrite(HOST_TR_BASE, host_env.tr_base);
+ __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
+}
+
+static void construct_vmcs(struct vcpu *v)
+{
+ unsigned long cr0, cr4;
union vmcs_arbytes arbytes;
vmx_vmcs_enter(v);
/* VMCS controls. */
- error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
- error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
- error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
- error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
+ __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
+ __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
+ __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
/* Host data selectors. */
- error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
#if defined(__i386__)
- error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_FS_BASE, 0);
- error |= __vmwrite(HOST_GS_BASE, 0);
+ __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_FS_BASE, 0);
+ __vmwrite(HOST_GS_BASE, 0);
#elif defined(__x86_64__)
- rdmsrl(MSR_FS_BASE, tmp); error |= __vmwrite(HOST_FS_BASE, tmp);
- rdmsrl(MSR_GS_BASE, tmp); error |= __vmwrite(HOST_GS_BASE, tmp);
+ {
+ unsigned long msr;
+ rdmsrl(MSR_FS_BASE, msr); __vmwrite(HOST_FS_BASE, msr);
+ rdmsrl(MSR_GS_BASE, msr); __vmwrite(HOST_GS_BASE, msr);
+ }
#endif
/* Host control registers. */
- error |= __vmwrite(HOST_CR0, read_cr0());
- error |= __vmwrite(HOST_CR4, read_cr4());
+ __vmwrite(HOST_CR0, read_cr0());
+ __vmwrite(HOST_CR4, read_cr4());
/* Host CS:RIP. */
- error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
- error |= __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
+ __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
+ __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
/* MSR intercepts. */
- error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
- error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
- error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
- error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
- error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
-
- error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
-
- error |= __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
- error |= __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
-
- error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
- error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
-
- error |= __vmwrite(CR3_TARGET_COUNT, 0);
-
- error |= __vmwrite(GUEST_ACTIVITY_STATE, 0);
+ __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
+ __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
+ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
+
+ __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
+
+ __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
+ __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
+
+ __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
+ __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
+
+ __vmwrite(CR3_TARGET_COUNT, 0);
+
+ __vmwrite(GUEST_ACTIVITY_STATE, 0);
/* Guest segment bases. */
- error |= __vmwrite(GUEST_ES_BASE, 0);
- error |= __vmwrite(GUEST_SS_BASE, 0);
- error |= __vmwrite(GUEST_DS_BASE, 0);
- error |= __vmwrite(GUEST_FS_BASE, 0);
- error |= __vmwrite(GUEST_GS_BASE, 0);
- error |= __vmwrite(GUEST_CS_BASE, 0);
+ __vmwrite(GUEST_ES_BASE, 0);
+ __vmwrite(GUEST_SS_BASE, 0);
+ __vmwrite(GUEST_DS_BASE, 0);
+ __vmwrite(GUEST_FS_BASE, 0);
+ __vmwrite(GUEST_GS_BASE, 0);
+ __vmwrite(GUEST_CS_BASE, 0);
/* Guest segment limits. */
- error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
/* Guest segment AR bytes. */
arbytes.bytes = 0;
@@ -362,82 +364,77 @@ static int construct_vmcs(struct vcpu *v
arbytes.fields.default_ops_size = 1; /* 32-bit */
arbytes.fields.g = 1;
arbytes.fields.null_bit = 0; /* not null */
- error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
arbytes.fields.seg_type = 0xb; /* type = 0xb */
- error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
/* Guest GDT. */
- error |= __vmwrite(GUEST_GDTR_BASE, 0);
- error |= __vmwrite(GUEST_GDTR_LIMIT, 0);
+ __vmwrite(GUEST_GDTR_BASE, 0);
+ __vmwrite(GUEST_GDTR_LIMIT, 0);
/* Guest IDT. */
- error |= __vmwrite(GUEST_IDTR_BASE, 0);
- error |= __vmwrite(GUEST_IDTR_LIMIT, 0);
+ __vmwrite(GUEST_IDTR_BASE, 0);
+ __vmwrite(GUEST_IDTR_LIMIT, 0);
/* Guest LDT and TSS. */
arbytes.fields.s = 0; /* not code or data segement */
arbytes.fields.seg_type = 0x2; /* LTD */
arbytes.fields.default_ops_size = 0; /* 16-bit */
arbytes.fields.g = 0;
- error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
- error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
-
- error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
- __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (tmp));
- error |= __vmwrite(GUEST_DR7, tmp);
- error |= __vmwrite(VMCS_LINK_POINTER, ~0UL);
+ __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
+
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
+ __vmwrite(GUEST_DR7, 0);
+ __vmwrite(VMCS_LINK_POINTER, ~0UL);
#if defined(__i386__)
- error |= __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
-#endif
-
- error |= __vmwrite(EXCEPTION_BITMAP,
- MONITOR_DEFAULT_EXCEPTION_BITMAP);
+ __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
+#endif
+
+ __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
/* Guest CR0. */
cr0 = read_cr0();
v->arch.hvm_vmx.cpu_cr0 = cr0;
- error |= __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+ __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
- error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
/* Guest CR4. */
cr4 = read_cr4();
- error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
+ __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
v->arch.hvm_vmx.cpu_shadow_cr4 =
cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
- error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
#ifdef __x86_64__
/* VLAPIC TPR optimisation. */
v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
v->arch.hvm_vcpu.u.vmx.exec_control &=
~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
- error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
- v->arch.hvm_vcpu.u.vmx.exec_control);
- error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
- page_to_maddr(vcpu_vlapic(v)->regs_page));
- error |= __vmwrite(TPR_THRESHOLD, 0);
-#endif
-
- error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
- error |= __vmwrite(GUEST_LDTR_BASE, 0);
- error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
-
- error |= __vmwrite(GUEST_TR_BASE, 0);
- error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
+ __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
+ page_to_maddr(vcpu_vlapic(v)->regs_page));
+ __vmwrite(TPR_THRESHOLD, 0);
+#endif
+
+ __vmwrite(GUEST_LDTR_SELECTOR, 0);
+ __vmwrite(GUEST_LDTR_BASE, 0);
+ __vmwrite(GUEST_LDTR_LIMIT, 0);
+
+ __vmwrite(GUEST_TR_BASE, 0);
+ __vmwrite(GUEST_TR_LIMIT, 0xff);
shadow_update_paging_modes(v);
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
__vmwrite(HOST_CR3, v->arch.cr3);
vmx_vmcs_exit(v);
-
- return error;
}
int vmx_create_vmcs(struct vcpu *v)
@@ -446,13 +443,8 @@ int vmx_create_vmcs(struct vcpu *v)
return -ENOMEM;
__vmx_clear_vmcs(v);
-
- if ( construct_vmcs(v) != 0 )
- {
- vmx_free_vmcs(v->arch.hvm_vmx.vmcs);
- v->arch.hvm_vmx.vmcs = NULL;
- return -EINVAL;
- }
+
+ construct_vmcs(v);
return 0;
}
@@ -472,16 +464,14 @@ void vmx_destroy_vmcs(struct vcpu *v)
void vm_launch_fail(unsigned long eflags)
{
- unsigned long error;
- __vmread(VM_INSTRUCTION_ERROR, &error);
+ unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
printk("<vm_launch_fail> error code %lx\n", error);
__hvm_bug(guest_cpu_user_regs());
}
void vm_resume_fail(unsigned long eflags)
{
- unsigned long error;
- __vmread(VM_INSTRUCTION_ERROR, &error);
+ unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
printk("<vm_resume_fail> error code %lx\n", error);
__hvm_bug(guest_cpu_user_regs());
}
@@ -510,7 +500,7 @@ static void print_section(char *header,
{
uint32_t addr, j;
unsigned long val;
- int code;
+ int code, rc;
char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
char *err[4] = {"------ ", "------------------ ",
"---------- ", "------------------ "};
@@ -526,7 +516,8 @@ static void print_section(char *header,
if (!(j&3))
printk("\n\t\t0x%08x: ", addr);
- if (!__vmread(addr, &val))
+ val = __vmread_safe(addr, &rc);
+ if (rc == 0)
printk(fmt[code], val);
else
printk("%s", err[code]);
diff -r 6555ca56d844 -r 9f9f569b0a1d xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Nov 08 15:11:18 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Nov 08 16:43:50 2006 +0000
@@ -154,14 +154,14 @@ static inline int long_mode_do_msr_read(
/* XXX should it be GP fault */
domain_crash_synchronous();
- __vmread(GUEST_FS_BASE, &msr_content);
+ msr_content = __vmread(GUEST_FS_BASE);
break;
case MSR_GS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
domain_crash_synchronous();
- __vmread(GUEST_GS_BASE, &msr_content);
+ msr_content = __vmread(GUEST_GS_BASE);
break;
case MSR_SHADOW_GS_BASE:
@@ -323,20 +323,20 @@ static inline int long_mode_do_msr_write
static inline void vmx_save_dr(struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.flag_dr_dirty )
- {
- savedebug(&v->arch.guest_context, 0);
- savedebug(&v->arch.guest_context, 1);
- savedebug(&v->arch.guest_context, 2);
- savedebug(&v->arch.guest_context, 3);
- savedebug(&v->arch.guest_context, 6);
-
- v->arch.hvm_vcpu.flag_dr_dirty = 0;
-
- v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
- v->arch.hvm_vcpu.u.vmx.exec_control);
- }
+ if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ return;
+
+ /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
+ v->arch.hvm_vcpu.flag_dr_dirty = 0;
+ v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
+
+ savedebug(&v->arch.guest_context, 0);
+ savedebug(&v->arch.guest_context, 1);
+ savedebug(&v->arch.guest_context, 2);
+ savedebug(&v->arch.guest_context, 3);
+ savedebug(&v->arch.guest_context, 6);
+ v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7);
}
static inline void __restore_debug_registers(struct vcpu *v)
@@ -347,7 +347,7 @@ static inline void __restore_debug_regis
loaddebug(&v->arch.guest_context, 3);
/* No 4 and 5 */
loaddebug(&v->arch.guest_context, 6);
- /* DR7 is loaded from the vmcs. */
+ /* DR7 is loaded from the VMCS. */
}
/*
@@ -355,21 +355,13 @@ static inline void __restore_debug_regis
* need to be restored if their value is going to affect execution -- i.e.,
* if one of the breakpoints is enabled. So mask out all bits that don't
* enable some breakpoint functionality.
- *
- * This is in part necessary because bit 10 of DR7 is hardwired to 1, so a
- * simple if( guest_dr7 ) will always return true. As long as we're masking,
- * we might as well do it right.
*/
#define DR7_ACTIVE_MASK 0xff
static inline void vmx_restore_dr(struct vcpu *v)
{
- unsigned long guest_dr7;
-
- __vmread(GUEST_DR7, &guest_dr7);
-
- /* Assumes guest does not have DR access at time of context switch. */
- if ( unlikely(guest_dr7 & DR7_ACTIVE_MASK) )
+ /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
+ if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
__restore_debug_registers(v);
}
@@ -430,22 +422,22 @@ static void vmx_store_cpu_guest_regs(
if ( regs != NULL )
{
- __vmread(GUEST_RFLAGS, ®s->eflags);
- __vmread(GUEST_SS_SELECTOR, ®s->ss);
- __vmread(GUEST_CS_SELECTOR, ®s->cs);
- __vmread(GUEST_DS_SELECTOR, ®s->ds);
- __vmread(GUEST_ES_SELECTOR, ®s->es);
- __vmread(GUEST_GS_SELECTOR, ®s->gs);
- __vmread(GUEST_FS_SELECTOR, ®s->fs);
- __vmread(GUEST_RIP, ®s->eip);
- __vmread(GUEST_RSP, ®s->esp);
+ regs->eflags = __vmread(GUEST_RFLAGS);
+ regs->ss = __vmread(GUEST_SS_SELECTOR);
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+ regs->ds = __vmread(GUEST_DS_SELECTOR);
+ regs->es = __vmread(GUEST_ES_SELECTOR);
+ regs->gs = __vmread(GUEST_GS_SELECTOR);
+ regs->fs = __vmread(GUEST_FS_SELECTOR);
+ regs->eip = __vmread(GUEST_RIP);
+ regs->esp = __vmread(GUEST_RSP);
}
if ( crs != NULL )
{
crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
crs[2] = v->arch.hvm_vmx.cpu_cr2;
- __vmread(GUEST_CR3, &crs[3]);
+ crs[3] = __vmread(GUEST_CR3);
crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
}
@@ -466,29 +458,26 @@ static void vmx_store_cpu_guest_regs(
*/
static void fixup_vm86_seg_bases(struct cpu_user_regs *regs)
{
- int err = 0;
unsigned long base;
- err |= __vmread(GUEST_ES_BASE, &base);
+ base = __vmread(GUEST_ES_BASE);
if (regs->es << 4 != base)
- err |= __vmwrite(GUEST_ES_BASE, regs->es << 4);
- err |= __vmread(GUEST_CS_BASE, &base);
+ __vmwrite(GUEST_ES_BASE, regs->es << 4);
+ base = __vmread(GUEST_CS_BASE);
if (regs->cs << 4 != base)
- err |= __vmwrite(GUEST_CS_BASE, regs->cs << 4);
- err |= __vmread(GUEST_SS_BASE, &base);
+ __vmwrite(GUEST_CS_BASE, regs->cs << 4);
+ base = __vmread(GUEST_SS_BASE);
if (regs->ss << 4 != base)
- err |= __vmwrite(GUEST_SS_BASE, regs->ss << 4);
- err |= __vmread(GUEST_DS_BASE, &base);
+ __vmwrite(GUEST_SS_BASE, regs->ss << 4);
+ base = __vmread(GUEST_DS_BASE);
if (regs->ds << 4 != base)
- err |= __vmwrite(GUEST_DS_BASE, regs->ds << 4);
- err |= __vmread(GUEST_FS_BASE, &base);
+ __vmwrite(GUEST_DS_BASE, regs->ds << 4);
+ base = __vmread(GUEST_FS_BASE);
if (regs->fs << 4 != base)
- err |= __vmwrite(GUEST_FS_BASE, regs->fs << 4);
- err |= __vmread(GUEST_GS_BASE, &base);
+ __vmwrite(GUEST_FS_BASE, regs->fs << 4);
+ base = __vmread(GUEST_GS_BASE);
if (regs->gs << 4 != base)
- err |= __vmwrite(GUEST_GS_BASE, regs->gs << 4);
-
- BUG_ON(err);
+ __vmwrite(GUEST_GS_BASE, regs->gs << 4);
}
static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
@@ -605,7 +594,7 @@ static int vmx_realmode(struct vcpu *v)
ASSERT(v == current);
- __vmread(GUEST_RFLAGS, &rflags);
+ rflags = __vmread(GUEST_RFLAGS);
return rflags & X86_EFLAGS_VM;
}
@@ -615,7 +604,7 @@ static int vmx_guest_x86_mode(struct vcp
ASSERT(v == current);
- __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
+ cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
if ( vmx_long_mode_enabled(v) )
return ((cs_ar_bytes & (1u<<13)) ?
@@ -735,7 +724,7 @@ static int __get_instruction_length(void
static int __get_instruction_length(void)
{
int len;
- __vmread(VM_EXIT_INSTRUCTION_LEN, &len); /* Safe: callers audited */
+ len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
if ( (len < 1) || (len > 15) )
__hvm_bug(guest_cpu_user_regs());
return len;
@@ -745,7 +734,7 @@ static void inline __update_guest_eip(un
{
unsigned long current_eip;
- __vmread(GUEST_RIP, ¤t_eip);
+ current_eip = __vmread(GUEST_RIP);
__vmwrite(GUEST_RIP, current_eip + inst_len);
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
}
@@ -758,8 +747,8 @@ static int vmx_do_page_fault(unsigned lo
{
unsigned long eip, cs;
- __vmread(GUEST_CS_BASE, &cs);
- __vmread(GUEST_RIP, &eip);
+ cs = __vmread(GUEST_CS_BASE);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"vmx_do_page_fault = 0x%lx, cs_base=%lx, "
"eip = %lx, error_code = %lx\n",
@@ -773,7 +762,7 @@ static int vmx_do_page_fault(unsigned lo
#if 0
if ( !result )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
}
#endif
@@ -805,7 +794,7 @@ static void vmx_do_cpuid(struct cpu_user
unsigned long eip;
struct vcpu *v = current;
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_3, "(eax) 0x%08lx, (ebx) 0x%08lx, "
"(ecx) 0x%08lx, (edx) 0x%08lx, (esi) 0x%08lx, (edi) 0x%08lx",
@@ -946,7 +935,7 @@ static void vmx_do_invlpg(unsigned long
unsigned long eip;
struct vcpu *v = current;
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
eip, va);
@@ -969,7 +958,7 @@ static int check_for_null_selector(unsig
/* INS can only use ES segment register, and it can't be overridden */
if ( dir == IOREQ_READ )
{
- __vmread(GUEST_ES_SELECTOR, &sel);
+ sel = __vmread(GUEST_ES_SELECTOR);
return sel == 0 ? 1 : 0;
}
@@ -991,25 +980,25 @@ static int check_for_null_selector(unsig
case 0x67: /* addr32 */
continue;
case 0x2e: /* CS */
- __vmread(GUEST_CS_SELECTOR, &sel);
+ sel = __vmread(GUEST_CS_SELECTOR);
break;
case 0x36: /* SS */
- __vmread(GUEST_SS_SELECTOR, &sel);
+ sel = __vmread(GUEST_SS_SELECTOR);
break;
case 0x26: /* ES */
- __vmread(GUEST_ES_SELECTOR, &sel);
+ sel = __vmread(GUEST_ES_SELECTOR);
break;
case 0x64: /* FS */
- __vmread(GUEST_FS_SELECTOR, &sel);
+ sel = __vmread(GUEST_FS_SELECTOR);
break;
case 0x65: /* GS */
- __vmread(GUEST_GS_SELECTOR, &sel);
+ sel = __vmread(GUEST_GS_SELECTOR);
break;
case 0x3e: /* DS */
/* FALLTHROUGH */
default:
/* DS is the default */
- __vmread(GUEST_DS_SELECTOR, &sel);
+ sel = __vmread(GUEST_DS_SELECTOR);
}
return sel == 0 ? 1 : 0;
}
@@ -1056,7 +1045,7 @@ static void vmx_io_instruction(unsigned
unsigned long addr, count = 1;
int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
- __vmread(GUEST_LINEAR_ADDRESS, &addr);
+ addr = __vmread(GUEST_LINEAR_ADDRESS);
/*
* In protected mode, guest linear address is invalid if the
@@ -1119,98 +1108,96 @@ static void vmx_io_instruction(unsigned
}
}
-static int vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
-{
- int error = 0;
-
+static void vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
+{
/* NB. Skip transition instruction. */
- error |= __vmread(GUEST_RIP, &c->eip);
+ c->eip = __vmread(GUEST_RIP);
c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
- error |= __vmread(GUEST_RSP, &c->esp);
- error |= __vmread(GUEST_RFLAGS, &c->eflags);
+ c->esp = __vmread(GUEST_RSP);
+ c->eflags = __vmread(GUEST_RFLAGS);
c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
c->cr3 = v->arch.hvm_vmx.cpu_cr3;
c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
- error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
- error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
-
- error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
- error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
-
- error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
- error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
- error |= __vmread(GUEST_CS_BASE, &c->cs_base);
- error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
-
- error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
- error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
- error |= __vmread(GUEST_DS_BASE, &c->ds_base);
- error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
-
- error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
- error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
- error |= __vmread(GUEST_ES_BASE, &c->es_base);
- error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
-
- error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
- error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
- error |= __vmread(GUEST_SS_BASE, &c->ss_base);
- error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
-
- error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
- error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
- error |= __vmread(GUEST_FS_BASE, &c->fs_base);
- error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
-
- error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
- error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
- error |= __vmread(GUEST_GS_BASE, &c->gs_base);
- error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
-
- error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
- error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
- error |= __vmread(GUEST_TR_BASE, &c->tr_base);
- error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
-
- error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
- error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
- error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
- error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
-
- return !error;
-}
-
-static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
+ c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
+ c->idtr_base = __vmread(GUEST_IDTR_BASE);
+
+ c->gdtr_limit = __vmread(GUEST_GDTR_LIMIT);
+ c->gdtr_base = __vmread(GUEST_GDTR_BASE);
+
+ c->cs_sel = __vmread(GUEST_CS_SELECTOR);
+ c->cs_limit = __vmread(GUEST_CS_LIMIT);
+ c->cs_base = __vmread(GUEST_CS_BASE);
+ c->cs_arbytes.bytes = __vmread(GUEST_CS_AR_BYTES);
+
+ c->ds_sel = __vmread(GUEST_DS_SELECTOR);
+ c->ds_limit = __vmread(GUEST_DS_LIMIT);
+ c->ds_base = __vmread(GUEST_DS_BASE);
+ c->ds_arbytes.bytes = __vmread(GUEST_DS_AR_BYTES);
+
+ c->es_sel = __vmread(GUEST_ES_SELECTOR);
+ c->es_limit = __vmread(GUEST_ES_LIMIT);
+ c->es_base = __vmread(GUEST_ES_BASE);
+ c->es_arbytes.bytes = __vmread(GUEST_ES_AR_BYTES);
+
+ c->ss_sel = __vmread(GUEST_SS_SELECTOR);
+ c->ss_limit = __vmread(GUEST_SS_LIMIT);
+ c->ss_base = __vmread(GUEST_SS_BASE);
+ c->ss_arbytes.bytes = __vmread(GUEST_SS_AR_BYTES);
+
+ c->fs_sel = __vmread(GUEST_FS_SELECTOR);
+ c->fs_limit = __vmread(GUEST_FS_LIMIT);
+ c->fs_base = __vmread(GUEST_FS_BASE);
+ c->fs_arbytes.bytes = __vmread(GUEST_FS_AR_BYTES);
+
+ c->gs_sel = __vmread(GUEST_GS_SELECTOR);
+ c->gs_limit = __vmread(GUEST_GS_LIMIT);
+ c->gs_base = __vmread(GUEST_GS_BASE);
+ c->gs_arbytes.bytes = __vmread(GUEST_GS_AR_BYTES);
+
+ c->tr_sel = __vmread(GUEST_TR_SELECTOR);
+ c->tr_limit = __vmread(GUEST_TR_LIMIT);
+ c->tr_base = __vmread(GUEST_TR_BASE);
+ c->tr_arbytes.bytes = __vmread(GUEST_TR_AR_BYTES);
+
+ c->ldtr_sel = __vmread(GUEST_LDTR_SELECTOR);
+ c->ldtr_limit = __vmread(GUEST_LDTR_LIMIT);
+ c->ldtr_base = __vmread(GUEST_LDTR_BASE);
+ c->ldtr_arbytes.bytes = __vmread(GUEST_LDTR_AR_BYTES);
+}
+
+static void vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
{
unsigned long mfn, old_base_mfn;
- int error = 0;
-
- error |= __vmwrite(GUEST_RIP, c->eip);
- error |= __vmwrite(GUEST_RSP, c->esp);
- error |= __vmwrite(GUEST_RFLAGS, c->eflags);
+
+ __vmwrite(GUEST_RIP, c->eip);
+ __vmwrite(GUEST_RSP, c->esp);
+ __vmwrite(GUEST_RFLAGS, c->eflags);
v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
- error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
-
- if (!vmx_paging_enabled(v))
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+
+ if ( !vmx_paging_enabled(v) )
goto skip_cr3;
- if (c->cr3 == v->arch.hvm_vmx.cpu_cr3) {
+ if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
+ {
/*
* This is simple TLB flush, implying the guest has
* removed some translation or changed page attributes.
* We simply invalidate the shadow.
*/
mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
- if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
+ if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
+ {
printk("Invalid CR3 value=%x", c->cr3);
domain_crash_synchronous();
- return 0;
- }
- } else {
+ }
+ }
+ else
+ {
/*
* If different, make a shadow. Check if the PDBR is valid
* first.
@@ -1221,10 +1208,9 @@ static int vmx_world_restore(struct vcpu
{
printk("Invalid CR3 value=%x", c->cr3);
domain_crash_synchronous();
- return 0;
- }
- if(!get_page(mfn_to_page(mfn), v->domain))
- return 0;
+ }
+ if ( !get_page(mfn_to_page(mfn), v->domain) )
+ domain_crash_synchronous();
old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
v->arch.guest_table = pagetable_from_pfn(mfn);
if (old_base_mfn)
@@ -1236,66 +1222,63 @@ static int vmx_world_restore(struct vcpu
}
skip_cr3:
-
- if (!vmx_paging_enabled(v))
+ if ( !vmx_paging_enabled(v) )
HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
else
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
- error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
+ __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
- error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
-
- error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
- error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
-
- error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
- error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
-
- error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
- error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
- error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
- error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
-
- error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
- error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
- error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
- error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
-
- error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
- error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
- error |= __vmwrite(GUEST_ES_BASE, c->es_base);
- error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
-
- error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
- error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
- error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
- error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
-
- error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
- error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
- error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
- error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
-
- error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
- error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
- error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
- error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
-
- error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
- error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
- error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
- error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
-
- error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
- error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
- error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
- error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+
+ __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
+ __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
+
+ __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
+ __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
+
+ __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
+ __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
+ __vmwrite(GUEST_CS_BASE, c->cs_base);
+ __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
+
+ __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
+ __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
+ __vmwrite(GUEST_DS_BASE, c->ds_base);
+ __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
+
+ __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
+ __vmwrite(GUEST_ES_LIMIT, c->es_limit);
+ __vmwrite(GUEST_ES_BASE, c->es_base);
+ __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
+
+ __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
+ __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
+ __vmwrite(GUEST_SS_BASE, c->ss_base);
+ __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
+
+ __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
+ __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
+ __vmwrite(GUEST_FS_BASE, c->fs_base);
+ __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
+
+ __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
+ __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
+ __vmwrite(GUEST_GS_BASE, c->gs_base);
+ __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
+
+ __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
+ __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
+ __vmwrite(GUEST_TR_BASE, c->tr_base);
+ __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
+
+ __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
+ __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
+ __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
+ __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
shadow_update_paging_modes(v);
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
-
- return !error;
}
enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
@@ -1325,8 +1308,7 @@ static int vmx_assist(struct vcpu *v, in
if (hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)))
goto error;
if (cp != 0) {
- if (!vmx_world_save(v, &c))
- goto error;
+ vmx_world_save(v, &c);
if (hvm_copy_to_guest_phys(cp, &c, sizeof(c)))
goto error;
}
@@ -1337,8 +1319,7 @@ static int vmx_assist(struct vcpu *v, in
if (cp != 0) {
if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
- if (!vmx_world_restore(v, &c))
- goto error;
+ vmx_world_restore(v, &c);
v->arch.hvm_vmx.vmxassist_enabled = 1;
return 1;
}
@@ -1355,8 +1336,7 @@ static int vmx_assist(struct vcpu *v, in
if (cp != 0) {
if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
- if (!vmx_world_restore(v, &c))
- goto error;
+ vmx_world_restore(v, &c);
v->arch.hvm_vmx.vmxassist_enabled = 0;
return 1;
}
@@ -1428,7 +1408,7 @@ static int vmx_set_cr0(unsigned long val
HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
|= EFER_LMA;
- __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
+ vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value |= VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
}
@@ -1482,7 +1462,7 @@ static int vmx_set_cr0(unsigned long val
{
v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
&= ~EFER_LMA;
- __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
+ vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
}
@@ -1490,7 +1470,7 @@ static int vmx_set_cr0(unsigned long val
if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
"Transfering control to vmxassist %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
@@ -1498,12 +1478,12 @@ static int vmx_set_cr0(unsigned long val
}
else if ( v->arch.hvm_vmx.vmxassist_enabled )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
"Enabling CR0.PE at %%eip 0x%lx\n", eip);
if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
"Restoring to %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
@@ -1515,7 +1495,7 @@ static int vmx_set_cr0(unsigned long val
{
v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
&= ~EFER_LMA;
- __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
+ vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
}
@@ -1570,7 +1550,7 @@ static int mov_to_cr(int gp, int cr, str
CASE_GET_REG(EDI, edi);
CASE_EXTEND_GET_REG;
case REG_ESP:
- __vmread(GUEST_RSP, &value);
+ value = __vmread(GUEST_RSP);
break;
default:
printk("invalid gp: %d\n", gp);
@@ -1821,13 +1801,13 @@ static inline void vmx_do_msr_read(struc
msr_content = hvm_get_guest_time(v);
break;
case MSR_IA32_SYSENTER_CS:
- __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
+ msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
break;
case MSR_IA32_SYSENTER_ESP:
- __vmread(GUEST_SYSENTER_ESP, &msr_content);
+ msr_content = __vmread(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_SYSENTER_EIP:
- __vmread(GUEST_SYSENTER_EIP, &msr_content);
+ msr_content = __vmread(GUEST_SYSENTER_EIP);
break;
case MSR_IA32_APICBASE:
msr_content = vcpu_vlapic(v)->apic_base_msr;
@@ -1903,14 +1883,13 @@ static void vmx_do_hlt(void)
static void vmx_do_hlt(void)
{
unsigned long rflags;
- __vmread(GUEST_RFLAGS, &rflags);
+ rflags = __vmread(GUEST_RFLAGS);
hvm_hlt(rflags);
}
static inline void vmx_do_extint(struct cpu_user_regs *regs)
{
unsigned int vector;
- int error;
asmlinkage void do_IRQ(struct cpu_user_regs *);
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);
@@ -1923,9 +1902,8 @@ static inline void vmx_do_extint(struct
fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
#endif
- if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
- && !(vector & INTR_INFO_VALID_MASK))
- __hvm_bug(regs);
+ vector = __vmread(VM_EXIT_INTR_INFO);
+ BUG_ON(!(vector & INTR_INFO_VALID_MASK));
vector &= INTR_INFO_VECTOR_MASK;
TRACE_VMEXIT(1, vector);
@@ -1964,40 +1942,40 @@ static inline void vmx_do_extint(struct
#if defined (__x86_64__)
void store_cpu_user_regs(struct cpu_user_regs *regs)
{
- __vmread(GUEST_SS_SELECTOR, ®s->ss);
- __vmread(GUEST_RSP, ®s->rsp);
- __vmread(GUEST_RFLAGS, ®s->rflags);
- __vmread(GUEST_CS_SELECTOR, ®s->cs);
- __vmread(GUEST_DS_SELECTOR, ®s->ds);
- __vmread(GUEST_ES_SELECTOR, ®s->es);
- __vmread(GUEST_RIP, ®s->rip);
+ regs->ss = __vmread(GUEST_SS_SELECTOR);
+ regs->rsp = __vmread(GUEST_RSP);
+ regs->rflags = __vmread(GUEST_RFLAGS);
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+ regs->ds = __vmread(GUEST_DS_SELECTOR);
+ regs->es = __vmread(GUEST_ES_SELECTOR);
+ regs->rip = __vmread(GUEST_RIP);
}
#elif defined (__i386__)
void store_cpu_user_regs(struct cpu_user_regs *regs)
{
- __vmread(GUEST_SS_SELECTOR, ®s->ss);
- __vmread(GUEST_RSP, ®s->esp);
- __vmread(GUEST_RFLAGS, ®s->eflags);
- __vmread(GUEST_CS_SELECTOR, ®s->cs);
- __vmread(GUEST_DS_SELECTOR, ®s->ds);
- __vmread(GUEST_ES_SELECTOR, ®s->es);
- __vmread(GUEST_RIP, ®s->eip);
+ regs->ss = __vmread(GUEST_SS_SELECTOR);
+ regs->esp = __vmread(GUEST_RSP);
+ regs->eflags = __vmread(GUEST_RFLAGS);
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+ regs->ds = __vmread(GUEST_DS_SELECTOR);
+ regs->es = __vmread(GUEST_ES_SELECTOR);
+ regs->eip = __vmread(GUEST_RIP);
}
#endif
#ifdef XEN_DEBUGGER
void save_cpu_user_regs(struct cpu_user_regs *regs)
{
- __vmread(GUEST_SS_SELECTOR, ®s->xss);
- __vmread(GUEST_RSP, ®s->esp);
- __vmread(GUEST_RFLAGS, ®s->eflags);
- __vmread(GUEST_CS_SELECTOR, ®s->xcs);
- __vmread(GUEST_RIP, ®s->eip);
-
- __vmread(GUEST_GS_SELECTOR, ®s->xgs);
- __vmread(GUEST_FS_SELECTOR, ®s->xfs);
- __vmread(GUEST_ES_SELECTOR, ®s->xes);
- __vmread(GUEST_DS_SELECTOR, ®s->xds);
+ regs->xss = __vmread(GUEST_SS_SELECTOR);
+ regs->esp = __vmread(GUEST_RSP);
+ regs->eflags = __vmread(GUEST_RFLAGS);
+ regs->xcs = __vmread(GUEST_CS_SELECTOR);
+ regs->eip = __vmread(GUEST_RIP);
+
+ regs->xgs = __vmread(GUEST_GS_SELECTOR);
+ regs->xfs = __vmread(GUEST_FS_SELECTOR);
+ regs->xes = __vmread(GUEST_ES_SELECTOR);
+ regs->xds = __vmread(GUEST_DS_SELECTOR);
}
void restore_cpu_user_regs(struct cpu_user_regs *regs)
@@ -2019,10 +1997,10 @@ static void vmx_reflect_exception(struct
{
int error_code, intr_info, vector;
- __vmread(VM_EXIT_INTR_INFO, &intr_info);
+ intr_info = __vmread(VM_EXIT_INTR_INFO);
vector = intr_info & 0xff;
if ( intr_info & INTR_INFO_DELIVER_CODE_MASK )
- __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
+ error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
else
error_code = VMX_DELIVER_NO_ERROR_CODE;
@@ -2030,7 +2008,7 @@ static void vmx_reflect_exception(struct
{
unsigned long rip;
- __vmread(GUEST_RIP, &rip);
+ rip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, error_code = %x",
rip, error_code);
}
@@ -2062,7 +2040,7 @@ asmlinkage void vmx_vmexit_handler(struc
unsigned long exit_qualification, inst_len = 0;
struct vcpu *v = current;
- __vmread(VM_EXIT_REASON, &exit_reason);
+ exit_reason = __vmread(VM_EXIT_REASON);
perfc_incra(vmexits, exit_reason);
@@ -2078,7 +2056,7 @@ asmlinkage void vmx_vmexit_handler(struc
{
unsigned int failed_vmentry_reason = exit_reason & 0xFFFF;
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
printk("Failed vm entry (exit reason 0x%x) ", exit_reason);
switch ( failed_vmentry_reason ) {
case EXIT_REASON_INVALID_GUEST_STATE:
@@ -2114,9 +2092,8 @@ asmlinkage void vmx_vmexit_handler(struc
*/
unsigned int intr_info, vector;
- if ( __vmread(VM_EXIT_INTR_INFO, &intr_info) ||
- !(intr_info & INTR_INFO_VALID_MASK) )
- __hvm_bug(regs);
+ intr_info = __vmread(VM_EXIT_INTR_INFO);
+ BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
vector = intr_info & INTR_INFO_VECTOR_MASK;
@@ -2177,8 +2154,8 @@ asmlinkage void vmx_vmexit_handler(struc
}
case TRAP_page_fault:
{
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
- __vmread(VM_EXIT_INTR_ERROR_CODE, ®s->error_code);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
TRACE_VMEXIT(3, regs->error_code);
TRACE_VMEXIT(4, exit_qualification);
@@ -2240,7 +2217,7 @@ asmlinkage void vmx_vmexit_handler(struc
{
inst_len = __get_instruction_length(); /* Safe: INVLPG */
__update_guest_eip(inst_len);
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
vmx_do_invlpg(exit_qualification);
TRACE_VMEXIT(4, exit_qualification);
break;
@@ -2254,7 +2231,7 @@ asmlinkage void vmx_vmexit_handler(struc
}
case EXIT_REASON_CR_ACCESS:
{
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
if ( vmx_cr_access(exit_qualification, regs) )
__update_guest_eip(inst_len);
@@ -2262,11 +2239,11 @@ asmlinkage void vmx_vmexit_handler(struc
break;
}
case EXIT_REASON_DR_ACCESS:
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
vmx_dr_access(exit_qualification, regs);
break;
case EXIT_REASON_IO_INSTRUCTION:
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
inst_len = __get_instruction_length(); /* Safe: IN, INS, OUT, OUTS */
vmx_io_instruction(exit_qualification, inst_len);
TRACE_VMEXIT(4, exit_qualification);
diff -r 6555ca56d844 -r 9f9f569b0a1d xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Nov 08 15:11:18 2006 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Nov 08 16:43:50 2006 +0000
@@ -183,80 +183,55 @@ static inline void __vmpclear(u64 addr)
: "memory");
}
-#define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr)))
-
-static always_inline int ___vmread(
- const unsigned long field, void *ptr, const int size)
-{
- unsigned long ecx = 0;
- int rc;
+static inline unsigned long __vmread(unsigned long field)
+{
+ unsigned long ecx;
+
+ __asm__ __volatile__ ( VMREAD_OPCODE
+ MODRM_EAX_ECX
+ /* CF==1 or ZF==1 --> crash (ud2) */
+ "ja 1f ; ud2 ; 1:\n"
+ : "=c" (ecx)
+ : "a" (field)
+ : "memory");
+
+ return ecx;
+}
+
+static inline void __vmwrite(unsigned long field, unsigned long value)
+{
+ __asm__ __volatile__ ( VMWRITE_OPCODE
+ MODRM_EAX_ECX
+ /* CF==1 or ZF==1 --> crash (ud2) */
+ "ja 1f ; ud2 ; 1:\n"
+ :
+ : "a" (field) , "c" (value)
+ : "memory");
+}
+
+static inline unsigned long __vmread_safe(unsigned long field, int *error)
+{
+ unsigned long ecx;
__asm__ __volatile__ ( VMREAD_OPCODE
MODRM_EAX_ECX
/* CF==1 or ZF==1 --> rc = -1 */
"setna %b0 ; neg %0"
- : "=q" (rc), "=c" (ecx)
+ : "=q" (*error), "=c" (ecx)
: "0" (0), "a" (field)
: "memory");
- switch ( size ) {
- case 1:
- *((u8 *) (ptr)) = ecx;
- break;
- case 2:
- *((u16 *) (ptr)) = ecx;
- break;
- case 4:
- *((u32 *) (ptr)) = ecx;
- break;
- case 8:
- *((u64 *) (ptr)) = ecx;
- break;
- default:
- domain_crash_synchronous();
- break;
- }
-
- return rc;
-}
-
-static inline int __vmwrite(unsigned long field, unsigned long value)
-{
- int rc;
-
- __asm__ __volatile__ ( VMWRITE_OPCODE
- MODRM_EAX_ECX
- /* CF==1 or ZF==1 --> rc = -1 */
- "setna %b0 ; neg %0"
- : "=q" (rc)
- : "0" (0), "a" (field) , "c" (value)
- : "memory");
-
- return rc;
-}
-
-static inline int __vm_set_bit(unsigned long field, unsigned long mask)
-{
- unsigned long tmp;
- int err = 0;
-
- err |= __vmread(field, &tmp);
- tmp |= mask;
- err |= __vmwrite(field, tmp);
-
- return err;
-}
-
-static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
-{
- unsigned long tmp;
- int err = 0;
-
- err |= __vmread(field, &tmp);
- tmp &= ~mask;
- err |= __vmwrite(field, tmp);
-
- return err;
+ return ecx;
+}
+
+static inline void __vm_set_bit(unsigned long field, unsigned long mask)
+{
+ __vmwrite(field, __vmread(field) | mask);
+}
+
+static inline void __vm_clear_bit(unsigned long field, unsigned long mask)
+{
+ __vmwrite(field, __vmread(field) & ~mask);
}
static inline void __vmxoff (void)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|