# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID f78bfe7bff73c439c047ecd09f20424236e3e962
# Parent 38c16b37529864809e64b807297a3c46c9e8d426
[XEN] Get rid of many uses of domain_crash_synchronous().
It is much more dangerous than domain_crash() because it
stops execution of teh current context regardless of
current state (e.g., IRQs disabled, locks held).
The preferred method to crash a domain is domain_crash()
and error return to the caller.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 3
xen/arch/x86/hvm/intercept.c | 6 -
xen/arch/x86/hvm/io.c | 23 ++---
xen/arch/x86/hvm/platform.c | 3
xen/arch/x86/hvm/svm/svm.c | 120 ++++++++++++++-----------------
xen/arch/x86/hvm/vmx/vmcs.c | 4 -
xen/arch/x86/hvm/vmx/vmx.c | 147 ++++++++++++++++++++------------------
xen/arch/x86/mm.c | 2
xen/arch/x86/traps.c | 4 -
xen/arch/x86/x86_32/traps.c | 19 +++-
xen/arch/x86/x86_64/traps.c | 9 +-
xen/include/asm-x86/hvm/support.h | 7 -
12 files changed, 175 insertions(+), 172 deletions(-)
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/hvm/hvm.c Mon Nov 13 12:01:43 2006 +0000
@@ -517,7 +517,8 @@ int hvm_bringup_ap(int vcpuid, int tramp
if ( bsp->vcpu_id != 0 )
{
gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
- domain_crash_synchronous();
+ domain_crash(bsp->domain);
+ return -EINVAL;
}
if ( (v = d->vcpu[vcpuid]) == NULL )
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/hvm/intercept.c Mon Nov 13 12:01:43 2006 +0000
@@ -253,11 +253,7 @@ int register_io_handler(
struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler;
int num = handler->num_slot;
- if ( num >= MAX_IO_HANDLER )
- {
- printk("no extra space, register io interceptor failed!\n");
- domain_crash_synchronous();
- }
+ BUG_ON(num >= MAX_IO_HANDLER);
handler->hdl_list[num].addr = addr;
handler->hdl_list[num].size = size;
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/hvm/io.c Mon Nov 13 12:01:43 2006 +0000
@@ -81,9 +81,7 @@ static void set_reg_value (int size, int
regs->ebx |= ((value & 0xFF) << 8);
break;
default:
- printk("Error: size:%x, index:%x are invalid!\n", size, index);
- domain_crash_synchronous();
- break;
+ goto crash;
}
break;
case WORD:
@@ -121,9 +119,7 @@ static void set_reg_value (int size, int
regs->edi |= (value & 0xFFFF);
break;
default:
- printk("Error: size:%x, index:%x are invalid!\n", size, index);
- domain_crash_synchronous();
- break;
+ goto crash;
}
break;
case LONG:
@@ -153,15 +149,13 @@ static void set_reg_value (int size, int
regs->edi = value;
break;
default:
- printk("Error: size:%x, index:%x are invalid!\n", size, index);
- domain_crash_synchronous();
- break;
+ goto crash;
}
break;
default:
- printk("Error: size:%x, index:%x are invalid!\n", size, index);
+ crash:
+ gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n", size, index);
domain_crash_synchronous();
- break;
}
}
#else
@@ -184,7 +178,7 @@ static inline void __set_reg_value(unsig
*reg = value;
break;
default:
- printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
+ gdprintk(XENLOG_ERR, "size:%x is invalid\n", size);
domain_crash_synchronous();
}
}
@@ -226,7 +220,8 @@ static void set_reg_value (int size, int
regs->rbx |= ((value & 0xFF) << 8);
break;
default:
- printk("Error: size:%x, index:%x are invalid!\n", size, index);
+ gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n",
+ size, index);
domain_crash_synchronous();
break;
}
@@ -283,7 +278,7 @@ static void set_reg_value (int size, int
__set_reg_value(®s->r15, size, value);
break;
default:
- printk("Error: <set_reg_value> Invalid index\n");
+ gdprintk(XENLOG_ERR, "Invalid index\n");
domain_crash_synchronous();
}
return;
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/hvm/platform.c Mon Nov 13 12:01:43 2006 +0000
@@ -731,8 +731,7 @@ static void hvm_send_assist_req(struct v
{
/* This indicates a bug in the device model. Crash the domain. */
gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
- domain_crash(v->domain);
- return;
+ domain_crash_synchronous();
}
prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Mon Nov 13 12:01:43 2006 +0000
@@ -326,14 +326,14 @@ static inline int long_mode_do_msr_write
static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- struct vcpu *vc = current;
- struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
+ struct vcpu *v = current;
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx "
"msr_content %"PRIx64"\n",
(unsigned long)regs->ecx, msr_content);
- switch (regs->ecx)
+ switch ( regs->ecx )
{
case MSR_EFER:
#ifdef __x86_64__
@@ -342,24 +342,24 @@ static inline int long_mode_do_msr_write
{
printk("Trying to set reserved bit in EFER: %"PRIx64"\n",
msr_content);
- svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
}
/* LME: 0 -> 1 */
if ( msr_content & EFER_LME &&
- !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state))
- {
- if ( svm_paging_enabled(vc) ||
+ !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
+ {
+ if ( svm_paging_enabled(v) ||
!test_bit(SVM_CPU_STATE_PAE_ENABLED,
- &vc->arch.hvm_svm.cpu_state) )
+ &v->arch.hvm_svm.cpu_state) )
{
printk("Trying to set LME bit when "
"in paging mode or PAE bit is not set\n");
- svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
}
- set_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state);
+ set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state);
}
/* We have already recorded that we want LME, so it will be set
@@ -374,13 +374,13 @@ static inline int long_mode_do_msr_write
case MSR_FS_BASE:
case MSR_GS_BASE:
- if ( !svm_long_mode_enabled(vc) )
- domain_crash_synchronous();
+ if ( !svm_long_mode_enabled(v) )
+ goto exit_and_crash;
if (!IS_CANO_ADDRESS(msr_content))
{
HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
- svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
}
if (regs->ecx == MSR_FS_BASE)
@@ -412,7 +412,13 @@ static inline int long_mode_do_msr_write
default:
return 0;
}
+
return 1;
+
+ exit_and_crash:
+ gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
+ domain_crash(v->domain);
+ return 1; /* handled */
}
@@ -420,7 +426,6 @@ static inline int long_mode_do_msr_write
__asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
#define savedebug(_v,_reg) \
__asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r"
((_v)->debugreg[_reg]))
-
static inline void svm_save_dr(struct vcpu *v)
{
@@ -938,7 +943,8 @@ static void svm_do_general_protection_fa
svm_dump_vmcb(__func__, vmcb);
svm_dump_regs(__func__, regs);
svm_dump_inst(vmcb->rip);
- __hvm_bug(regs);
+ domain_crash(v->domain);
+ return;
}
HVM_DBG_LOG(DBG_LEVEL_1,
@@ -1169,8 +1175,9 @@ static void svm_get_prefix_info(
if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst))
!= MAX_INST_LEN)
{
- printk("%s: get guest instruction failed\n", __func__);
- domain_crash_synchronous();
+ gdprintk(XENLOG_ERR, "get guest instruction failed\n");
+ domain_crash(current->domain);
+ return;
}
for (i = 0; i < MAX_INST_LEN; i++)
@@ -1266,9 +1273,7 @@ static inline int svm_get_io_address(
isize --;
if (isize > 1)
- {
svm_get_prefix_info(vmcb, dir, &seg, &asize);
- }
ASSERT(dir == IOREQ_READ || dir == IOREQ_WRITE);
@@ -1470,8 +1475,10 @@ static int svm_set_cr0(unsigned long val
mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain))
{
- printk("Invalid CR3 value = %lx\n", v->arch.hvm_svm.cpu_cr3);
- domain_crash_synchronous(); /* need to take a clean path */
+ gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
+ v->arch.hvm_svm.cpu_cr3, mfn);
+ domain_crash(v->domain);
+ return 0;
}
#if defined(__x86_64__)
@@ -1556,7 +1563,7 @@ static void mov_from_cr(int cr, int gp,
vmcb = v->arch.hvm_svm.vmcb;
ASSERT(vmcb);
- switch (cr)
+ switch ( cr )
{
case 0:
value = v->arch.hvm_svm.cpu_shadow_cr0;
@@ -1582,7 +1589,8 @@ static void mov_from_cr(int cr, int gp,
break;
default:
- __hvm_bug(regs);
+ domain_crash(v->domain);
+ return;
}
set_reg(gp, value, regs, vmcb);
@@ -1602,13 +1610,10 @@ static inline int svm_pgbit_test(struct
*/
static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
{
- unsigned long value;
- unsigned long old_cr;
+ unsigned long value, old_cr, old_base_mfn, mfn;
struct vcpu *v = current;
struct vlapic *vlapic = vcpu_vlapic(v);
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- ASSERT(vmcb);
value = get_reg(gpreg, regs, vmcb);
@@ -1623,8 +1628,6 @@ static int mov_to_cr(int gpreg, int cr,
return svm_set_cr0(value);
case 3:
- {
- unsigned long old_base_mfn, mfn;
if (svm_dbg_on)
printk("CR3 write =%lx \n", value );
/* If paging is not enabled yet, simply copy the value to CR3. */
@@ -1644,7 +1647,7 @@ static int mov_to_cr(int gpreg, int cr,
*/
mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
if (mfn != pagetable_get_pfn(v->arch.guest_table))
- __hvm_bug(regs);
+ goto bad_cr3;
shadow_update_cr3(v);
}
else
@@ -1656,10 +1659,7 @@ static int mov_to_cr(int gpreg, int cr,
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain))
- {
- printk("Invalid CR3 value=%lx\n", value);
- domain_crash_synchronous(); /* need to take a clean path */
- }
+ goto bad_cr3;
old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
v->arch.guest_table = pagetable_from_pfn(mfn);
@@ -1673,10 +1673,8 @@ static int mov_to_cr(int gpreg, int cr,
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
break;
- }
case 4: /* CR4 */
- {
if (svm_dbg_on)
printk( "write cr4=%lx, cr0=%lx\n",
value, v->arch.hvm_svm.cpu_shadow_cr0 );
@@ -1692,10 +1690,7 @@ static int mov_to_cr(int gpreg, int cr,
mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
if ( !VALID_MFN(mfn) ||
!get_page(mfn_to_page(mfn), v->domain) )
- {
- printk("Invalid CR3 value = %lx", v->arch.hvm_svm.cpu_cr3);
- domain_crash_synchronous(); /* need to take a clean path */
- }
+ goto bad_cr3;
/*
* Now arch.guest_table points to machine physical.
@@ -1741,20 +1736,23 @@ static int mov_to_cr(int gpreg, int cr,
shadow_update_paging_modes(v);
}
break;
- }
case 8:
- {
vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
break;
- }
default:
- printk("invalid cr: %d\n", cr);
- __hvm_bug(regs);
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+ domain_crash(v->domain);
+ return 0;
}
return 1;
+
+ bad_cr3:
+ gdprintk(XENLOG_ERR, "Invalid CR3\n");
+ domain_crash(v->domain);
+ return 0;
}
@@ -1857,8 +1855,7 @@ static int svm_cr_access(struct vcpu *v,
break;
default:
- __hvm_bug(regs);
- break;
+ BUG();
}
ASSERT(inst_len);
@@ -2037,16 +2034,15 @@ void svm_handle_invlpg(const short invlp
int inst_len;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- ASSERT(vmcb);
/*
* Unknown how many bytes the invlpg instruction will take. Use the
* maximum instruction length here
*/
if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length)
{
- printk("svm_handle_invlpg (): Error reading memory %d bytes\n",
- length);
- __hvm_bug(regs);
+ gdprintk(XENLOG_ERR, "Error reading memory %d bytes\n", length);
+ domain_crash(v->domain);
+ return;
}
if (invlpga)
@@ -2510,7 +2506,7 @@ asmlinkage void svm_vmexit_handler(struc
if (exit_reason == VMEXIT_INVALID)
{
svm_dump_vmcb(__func__, vmcb);
- domain_crash_synchronous();
+ goto exit_and_crash;
}
#ifdef SVM_EXTRA_DEBUG
@@ -2734,8 +2730,7 @@ asmlinkage void svm_vmexit_handler(struc
break;
case VMEXIT_TASK_SWITCH:
- __hvm_bug(regs);
- break;
+ goto exit_and_crash;
case VMEXIT_CPUID:
svm_vmexit_do_cpuid(vmcb, regs->eax, regs);
@@ -2811,15 +2806,16 @@ asmlinkage void svm_vmexit_handler(struc
break;
case VMEXIT_SHUTDOWN:
- printk("Guest shutdown exit\n");
- domain_crash_synchronous();
- break;
+ gdprintk(XENLOG_ERR, "Guest shutdown exit\n");
+ goto exit_and_crash;
default:
- printk("unexpected VMEXIT: exit reason = 0x%x, exitinfo1 = %"PRIx64", "
- "exitinfo2 = %"PRIx64"\n", exit_reason,
- (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
- __hvm_bug(regs); /* should not happen */
+ exit_and_crash:
+ gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "
+ "exitinfo1 = %"PRIx64", exitinfo2 = %"PRIx64"\n",
+ exit_reason,
+ (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
+ domain_crash(v->domain);
break;
}
@@ -2840,8 +2836,6 @@ asmlinkage void svm_vmexit_handler(struc
printk("svm_vmexit_handler: Returning\n");
}
#endif
-
- return;
}
asmlinkage void svm_load_cr2(void)
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Mon Nov 13 12:01:43 2006 +0000
@@ -466,14 +466,14 @@ void vm_launch_fail(unsigned long eflags
{
unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
printk("<vm_launch_fail> error code %lx\n", error);
- __hvm_bug(guest_cpu_user_regs());
+ domain_crash_synchronous();
}
void vm_resume_fail(unsigned long eflags)
{
unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
printk("<vm_resume_fail> error code %lx\n", error);
- __hvm_bug(guest_cpu_user_regs());
+ domain_crash_synchronous();
}
void arch_vmx_do_resume(struct vcpu *v)
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 13 12:01:43 2006 +0000
@@ -151,15 +151,14 @@ static inline int long_mode_do_msr_read(
case MSR_FS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
- /* XXX should it be GP fault */
- domain_crash_synchronous();
+ goto exit_and_crash;
msr_content = __vmread(GUEST_FS_BASE);
break;
case MSR_GS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
- domain_crash_synchronous();
+ goto exit_and_crash;
msr_content = __vmread(GUEST_GS_BASE);
break;
@@ -183,6 +182,11 @@ static inline int long_mode_do_msr_read(
regs->edx = (u32)(msr_content >> 32);
return 1;
+
+ exit_and_crash:
+ gdprintk(XENLOG_ERR, "Fatal error reading MSR %lx\n", (long)regs->ecx);
+ domain_crash(v->domain);
+ return 1; /* handled */
}
static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
@@ -233,7 +237,7 @@ static inline int long_mode_do_msr_write
case MSR_FS_BASE:
case MSR_GS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
- domain_crash_synchronous();
+ goto exit_and_crash;
if ( !IS_CANO_ADDRESS(msr_content) )
{
@@ -251,7 +255,7 @@ static inline int long_mode_do_msr_write
case MSR_SHADOW_GS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
- domain_crash_synchronous();
+ goto exit_and_crash;
v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
@@ -267,6 +271,11 @@ static inline int long_mode_do_msr_write
}
return 1;
+
+ exit_and_crash:
+ gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
+ domain_crash(v->domain);
+ return 1; /* handled */
}
static void vmx_restore_msrs(struct vcpu *v)
@@ -726,8 +735,7 @@ static int __get_instruction_length(void
{
int len;
len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
- if ( (len < 1) || (len > 15) )
- __hvm_bug(guest_cpu_user_regs());
+ BUG_ON((len < 1) || (len > 15));
return len;
}
@@ -823,7 +831,10 @@ static void vmx_do_cpuid(struct cpu_user
/* 8-byte aligned valid pseudophys address from vmxassist, please. */
if ( (value & 7) || (mfn == INVALID_MFN) ||
!v->arch.hvm_vmx.vmxassist_enabled )
- domain_crash_synchronous();
+ {
+ domain_crash(v->domain);
+ return;
+ }
p = map_domain_page(mfn);
value = *((uint64_t *)(p + (value & (PAGE_SIZE - 1))));
@@ -966,8 +977,9 @@ static int check_for_null_selector(unsig
memset(inst, 0, MAX_INST_LEN);
if ( inst_copy_from_guest(inst, eip, inst_len) != inst_len )
{
- printk("check_for_null_selector: get guest instruction failed\n");
- domain_crash_synchronous();
+ gdprintk(XENLOG_ERR, "Get guest instruction failed\n");
+ domain_crash(current->domain);
+ return 0;
}
for ( i = 0; i < inst_len; i++ )
@@ -1169,7 +1181,7 @@ static void vmx_world_save(struct vcpu *
c->ldtr_arbytes.bytes = __vmread(GUEST_LDTR_AR_BYTES);
}
-static void vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
+static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
{
unsigned long mfn, old_base_mfn;
@@ -1192,10 +1204,7 @@ static void vmx_world_restore(struct vcp
*/
mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
- {
- printk("Invalid CR3 value=%x", c->cr3);
- domain_crash_synchronous();
- }
+ goto bad_cr3;
}
else
{
@@ -1205,13 +1214,8 @@ static void vmx_world_restore(struct vcp
*/
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
- if ( !VALID_MFN(mfn) )
- {
- printk("Invalid CR3 value=%x", c->cr3);
- domain_crash_synchronous();
- }
- if ( !get_page(mfn_to_page(mfn), v->domain) )
- domain_crash_synchronous();
+ if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
+ goto bad_cr3;
old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
v->arch.guest_table = pagetable_from_pfn(mfn);
if (old_base_mfn)
@@ -1280,6 +1284,11 @@ static void vmx_world_restore(struct vcp
shadow_update_paging_modes(v);
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+ return 0;
+
+ bad_cr3:
+ gdprintk(XENLOG_ERR, "Invalid CR3 value=%x", c->cr3);
+ return -EINVAL;
}
enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
@@ -1320,7 +1329,8 @@ static int vmx_assist(struct vcpu *v, in
if (cp != 0) {
if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
- vmx_world_restore(v, &c);
+ if ( vmx_world_restore(v, &c) != 0 )
+ goto error;
v->arch.hvm_vmx.vmxassist_enabled = 1;
return 1;
}
@@ -1337,7 +1347,8 @@ static int vmx_assist(struct vcpu *v, in
if (cp != 0) {
if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
- vmx_world_restore(v, &c);
+ if ( vmx_world_restore(v, &c) != 0 )
+ goto error;
v->arch.hvm_vmx.vmxassist_enabled = 0;
return 1;
}
@@ -1345,8 +1356,8 @@ static int vmx_assist(struct vcpu *v, in
}
error:
- printk("Failed to transfer to vmxassist\n");
- domain_crash_synchronous();
+ gdprintk(XENLOG_ERR, "Failed to transfer to vmxassist\n");
+ domain_crash(v->domain);
return 0;
}
@@ -1390,9 +1401,10 @@ static int vmx_set_cr0(unsigned long val
mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
{
- printk("Invalid CR3 value = %lx (mfn=%lx)\n",
- v->arch.hvm_vmx.cpu_cr3, mfn);
- domain_crash_synchronous(); /* need to take a clean path */
+ gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
+ v->arch.hvm_vmx.cpu_cr3, mfn);
+ domain_crash(v->domain);
+ return 0;
}
#if defined(__x86_64__)
@@ -1536,12 +1548,12 @@ static int vmx_set_cr0(unsigned long val
*/
static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
{
- unsigned long value;
- unsigned long old_cr;
+ unsigned long value, old_cr, old_base_mfn, mfn;
struct vcpu *v = current;
struct vlapic *vlapic = vcpu_vlapic(v);
- switch ( gp ) {
+ switch ( gp )
+ {
CASE_GET_REG(EAX, eax);
CASE_GET_REG(ECX, ecx);
CASE_GET_REG(EDX, edx);
@@ -1554,8 +1566,8 @@ static int mov_to_cr(int gp, int cr, str
value = __vmread(GUEST_RSP);
break;
default:
- printk("invalid gp: %d\n", gp);
- __hvm_bug(regs);
+ gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
+ goto exit_and_crash;
}
TRACE_VMEXIT(1, TYPE_MOV_TO_CR);
@@ -1564,13 +1576,12 @@ static int mov_to_cr(int gp, int cr, str
HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
- switch ( cr ) {
+ switch ( cr )
+ {
case 0:
return vmx_set_cr0(value);
+
case 3:
- {
- unsigned long old_base_mfn, mfn;
-
/*
* If paging is not enabled yet, simply copy the value to CR3.
*/
@@ -1590,7 +1601,7 @@ static int mov_to_cr(int gp, int cr, str
*/
mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
if (mfn != pagetable_get_pfn(v->arch.guest_table))
- __hvm_bug(regs);
+ goto bad_cr3;
shadow_update_cr3(v);
} else {
/*
@@ -1600,10 +1611,7 @@ static int mov_to_cr(int gp, int cr, str
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
- {
- printk("Invalid CR3 value=%lx\n", value);
- domain_crash_synchronous(); /* need to take a clean path */
- }
+ goto bad_cr3;
old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
v->arch.guest_table = pagetable_from_pfn(mfn);
if (old_base_mfn)
@@ -1618,9 +1626,8 @@ static int mov_to_cr(int gp, int cr, str
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
}
break;
- }
+
case 4: /* CR4 */
- {
old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
@@ -1633,10 +1640,7 @@ static int mov_to_cr(int gp, int cr, str
mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
if ( !VALID_MFN(mfn) ||
!get_page(mfn_to_page(mfn), v->domain) )
- {
- printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
- domain_crash_synchronous(); /* need to take a clean path */
- }
+ goto bad_cr3;
/*
* Now arch.guest_table points to machine physical.
@@ -1682,18 +1686,24 @@ static int mov_to_cr(int gp, int cr, str
if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
shadow_update_paging_modes(v);
break;
- }
+
case 8:
- {
vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
break;
- }
+
default:
- printk("invalid cr: %d\n", gp);
- __hvm_bug(regs);
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+ domain_crash(v->domain);
+ return 0;
}
return 1;
+
+ bad_cr3:
+ gdprintk(XENLOG_ERR, "Invalid CR3\n");
+ exit_and_crash:
+ domain_crash(v->domain);
+ return 0;
}
/*
@@ -1715,7 +1725,9 @@ static void mov_from_cr(int cr, int gp,
value = (value & 0xF0) >> 4;
break;
default:
- __hvm_bug(regs);
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+ domain_crash(v->domain);
+ break;
}
switch ( gp ) {
@@ -1733,7 +1745,8 @@ static void mov_from_cr(int cr, int gp,
break;
default:
printk("invalid gp: %d\n", gp);
- __hvm_bug(regs);
+ domain_crash(v->domain);
+ break;
}
TRACE_VMEXIT(1, TYPE_MOV_FROM_CR);
@@ -1782,9 +1795,9 @@ static int vmx_cr_access(unsigned long e
return vmx_set_cr0(value);
break;
default:
- __hvm_bug(regs);
- break;
- }
+ BUG();
+ }
+
return 1;
}
@@ -2072,7 +2085,7 @@ asmlinkage void vmx_vmexit_handler(struc
printk("************* VMCS Area **************\n");
vmcs_dump_vcpu();
printk("**************************************\n");
- domain_crash_synchronous();
+ goto exit_and_crash;
}
TRACE_VMEXIT(0, exit_reason);
@@ -2186,8 +2199,7 @@ asmlinkage void vmx_vmexit_handler(struc
vmx_do_extint(regs);
break;
case EXIT_REASON_TRIPLE_FAULT:
- domain_crash_synchronous();
- break;
+ goto exit_and_crash;
case EXIT_REASON_PENDING_INTERRUPT:
/* Disable the interrupt window. */
v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -2195,8 +2207,7 @@ asmlinkage void vmx_vmexit_handler(struc
v->arch.hvm_vcpu.u.vmx.exec_control);
break;
case EXIT_REASON_TASK_SWITCH:
- domain_crash_synchronous();
- break;
+ goto exit_and_crash;
case EXIT_REASON_CPUID:
inst_len = __get_instruction_length(); /* Safe: CPUID */
__update_guest_eip(inst_len);
@@ -2261,8 +2272,7 @@ asmlinkage void vmx_vmexit_handler(struc
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
case EXIT_REASON_PAUSE_INSTRUCTION:
- domain_crash_synchronous();
- break;
+ goto exit_and_crash;
case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH:
case EXIT_REASON_VMPTRLD:
@@ -2282,7 +2292,10 @@ asmlinkage void vmx_vmexit_handler(struc
break;
default:
- domain_crash_synchronous(); /* should not happen */
+ exit_and_crash:
+ gdprintk(XENLOG_ERR, "Bad vmexit (reason %x)\n", exit_reason);
+ domain_crash(v->domain);
+ break;
}
}
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/mm.c Mon Nov 13 12:01:43 2006 +0000
@@ -1717,7 +1717,7 @@ int new_guest_cr3(unsigned long mfn)
unsigned long old_base_mfn;
if ( is_hvm_domain(d) && !hvm_paging_enabled(v) )
- domain_crash_synchronous();
+ return 0;
if ( shadow_mode_refcounts(d) )
{
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/traps.c Mon Nov 13 12:01:43 2006 +0000
@@ -1310,8 +1310,10 @@ static int emulate_privileged_op(struct
case 3: /* Write CR3 */
LOCK_BIGLOCK(v->domain);
- (void)new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
+ rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
UNLOCK_BIGLOCK(v->domain);
+ if ( rc == 0 ) /* not okay */
+ goto fail;
break;
case 4:
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/x86_32/traps.c Mon Nov 13 12:01:43 2006 +0000
@@ -179,16 +179,16 @@ unsigned long do_iret(void)
/* Check worst-case stack frame for overlap with Xen protected area. */
if ( unlikely(!access_ok(regs->esp, 40)) )
- domain_crash_synchronous();
+ goto exit_and_crash;
/* Pop and restore EAX (clobbered by hypercall). */
if ( unlikely(__copy_from_user(®s->eax, (void __user *)regs->esp, 4)) )
- domain_crash_synchronous();
+ goto exit_and_crash;
regs->esp += 4;
/* Pop and restore CS and EIP. */
if ( unlikely(__copy_from_user(®s->eip, (void __user *)regs->esp, 8)) )
- domain_crash_synchronous();
+ goto exit_and_crash;
regs->esp += 8;
/*
@@ -196,7 +196,7 @@ unsigned long do_iret(void)
* to avoid firing the BUG_ON(IOPL) check in arch_getdomaininfo_ctxt.
*/
if ( unlikely(__copy_from_user(&eflags, (void __user *)regs->esp, 4)) )
- domain_crash_synchronous();
+ goto exit_and_crash;
regs->esp += 4;
regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
@@ -204,17 +204,17 @@ unsigned long do_iret(void)
{
/* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
if ( __copy_from_user(®s->esp, (void __user *)regs->esp, 24) )
- domain_crash_synchronous();
+ goto exit_and_crash;
}
else if ( unlikely(ring_0(regs)) )
{
- domain_crash_synchronous();
+ goto exit_and_crash;
}
else if ( !ring_1(regs) )
{
/* Return to ring 2/3: pop and restore ESP and SS. */
if ( __copy_from_user(®s->esp, (void __user *)regs->esp, 8) )
- domain_crash_synchronous();
+ goto exit_and_crash;
}
/* No longer in NMI context. */
@@ -228,6 +228,11 @@ unsigned long do_iret(void)
* value.
*/
return regs->eax;
+
+ exit_and_crash:
+ gdprintk(XENLOG_ERR, "Fatal error\n");
+ domain_crash(current->domain);
+ return 0;
}
#include <asm/asm_defns.h>
diff -r 38c16b375298 -r f78bfe7bff73 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/arch/x86/x86_64/traps.c Mon Nov 13 12:01:43 2006 +0000
@@ -200,7 +200,7 @@ unsigned long do_iret(void)
{
gdprintk(XENLOG_ERR, "Fault while reading IRET context from "
"guest stack\n");
- domain_crash_synchronous();
+ goto exit_and_crash;
}
/* Returning to user mode? */
@@ -210,7 +210,7 @@ unsigned long do_iret(void)
{
gdprintk(XENLOG_ERR, "Guest switching to user mode with no "
"user page tables\n");
- domain_crash_synchronous();
+ goto exit_and_crash;
}
toggle_guest_mode(v);
}
@@ -236,6 +236,11 @@ unsigned long do_iret(void)
/* Saved %rax gets written back to regs->rax in entry.S. */
return iret_saved.rax;
+
+ exit_and_crash:
+ gdprintk(XENLOG_ERR, "Fatal error\n");
+ domain_crash(v->domain);
+ return 0;
}
asmlinkage void syscall_enter(void);
diff -r 38c16b375298 -r f78bfe7bff73 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Mon Nov 13 10:43:29 2006 +0000
+++ b/xen/include/asm-x86/hvm/support.h Mon Nov 13 12:01:43 2006 +0000
@@ -118,13 +118,6 @@ extern unsigned int opt_hvm_debug_level;
#define HVM_DBG_LOG(level, _f, _a...)
#endif
-#define __hvm_bug(regs) \
- do { \
- printk("__hvm_bug at %s:%d\n", __FILE__, __LINE__); \
- show_execution_state(regs); \
- domain_crash_synchronous(); \
- } while (0)
-
#define TRACE_VMEXIT(index, value) \
current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|