# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1190208344 -3600
# Node ID ec3b23d8d544e209e2a338776538c4d13e4f67c5
# Parent 202153d094d883be1d8567031ff1a5957a664ea7
hvm: Always keep canonical copy of RIP/RSP/RFLAGS in
guest_cpu_user_regs(). Reduces complexity at little or no performance
cost (except on really old Intel P4 hardware where VMREAD/VMWRITE are
silly expensive).
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/domain.c | 10 --
xen/arch/x86/domctl.c | 6 -
xen/arch/x86/hvm/hvm.c | 13 +--
xen/arch/x86/hvm/io.c | 1
xen/arch/x86/hvm/platform.c | 1
xen/arch/x86/hvm/svm/emulate.c | 19 ++--
xen/arch/x86/hvm/svm/svm.c | 124 ++++++++++----------------------
xen/arch/x86/hvm/svm/x86_32/exits.S | 12 +++
xen/arch/x86/hvm/svm/x86_64/exits.S | 12 +++
xen/arch/x86/hvm/vmx/vmx.c | 120 +++++-------------------------
xen/arch/x86/hvm/vmx/x86_32/exits.S | 31 ++++++--
xen/arch/x86/hvm/vmx/x86_64/exits.S | 31 ++++++--
xen/arch/x86/mm/shadow/multi.c | 6 -
xen/arch/x86/oprofile/op_model_athlon.c | 1
xen/arch/x86/x86_32/asm-offsets.c | 3
xen/arch/x86/x86_32/traps.c | 1
xen/arch/x86/x86_64/asm-offsets.c | 3
xen/arch/x86/x86_64/traps.c | 1
xen/include/asm-x86/hvm/hvm.h | 26 ------
xen/include/asm-x86/hvm/svm/emulate.h | 10 --
xen/include/asm-x86/hvm/vcpu.h | 2
21 files changed, 170 insertions(+), 263 deletions(-)
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/domain.c Wed Sep 19 14:25:44 2007 +0100
@@ -631,10 +631,10 @@ int arch_set_info_guest(
memcpy(&v->arch.guest_context, c.nat, sizeof(*c.nat));
#ifdef CONFIG_COMPAT
else
- {
XLAT_vcpu_guest_context(&v->arch.guest_context, c.cmp);
- }
-#endif
+#endif
+
+ v->arch.guest_context.user_regs.eflags |= 2;
/* Only CR0.TS is modifiable by guest or admin. */
v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS;
@@ -650,10 +650,6 @@ int arch_set_info_guest(
/* Ensure real hardware interrupts are enabled. */
v->arch.guest_context.user_regs.eflags |= EF_IE;
- }
- else
- {
- hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
}
if ( v->is_initialised )
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/domctl.c Wed Sep 19 14:25:44 2007 +0100
@@ -556,7 +556,6 @@ void arch_get_info_guest(struct vcpu *v,
{
if ( !is_pv_32on64_domain(v->domain) )
{
- hvm_store_cpu_guest_regs(v, &c.nat->user_regs);
memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
@@ -566,11 +565,6 @@ void arch_get_info_guest(struct vcpu *v,
#ifdef CONFIG_COMPAT
else
{
- struct cpu_user_regs user_regs;
- unsigned i;
-
- hvm_store_cpu_guest_regs(v, &user_regs);
- XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
memset(c.cmp->ctrlreg, 0, sizeof(c.cmp->ctrlreg));
c.cmp->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
c.cmp->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c Wed Sep 19 14:25:44 2007 +0100
@@ -283,8 +283,10 @@ static int hvm_save_cpu_ctxt(struct doma
ctxt.rbp = vc->user_regs.ebp;
ctxt.rsi = vc->user_regs.esi;
ctxt.rdi = vc->user_regs.edi;
- /* %rsp handled by arch-specific call above */
-#ifdef __x86_64__
+ ctxt.rsp = vc->user_regs.esp;
+ ctxt.rip = vc->user_regs.eip;
+ ctxt.rflags = vc->user_regs.eflags;
+#ifdef __x86_64__
ctxt.r8 = vc->user_regs.r8;
ctxt.r9 = vc->user_regs.r9;
ctxt.r10 = vc->user_regs.r10;
@@ -347,6 +349,8 @@ static int hvm_load_cpu_ctxt(struct doma
vc->user_regs.esi = ctxt.rsi;
vc->user_regs.edi = ctxt.rdi;
vc->user_regs.esp = ctxt.rsp;
+ vc->user_regs.eip = ctxt.rip;
+ vc->user_regs.eflags = ctxt.rflags | 2;
#ifdef __x86_64__
vc->user_regs.r8 = ctxt.r8;
vc->user_regs.r9 = ctxt.r9;
@@ -973,8 +977,6 @@ void hvm_task_switch(
goto out;
}
- hvm_store_cpu_guest_regs(v, regs);
-
ptss = hvm_map(prev_tr.base, sizeof(tss));
if ( ptss == NULL )
goto out;
@@ -1080,8 +1082,6 @@ void hvm_task_switch(
&linear_addr) )
hvm_copy_to_guest_virt(linear_addr, &errcode, 4);
}
-
- hvm_load_cpu_guest_regs(v, regs);
out:
hvm_unmap(optss_desc);
@@ -1322,7 +1322,6 @@ int hvm_do_hypercall(struct cpu_user_reg
#endif
case 4:
case 2:
- hvm_store_cpu_guest_regs(current, regs);
if ( unlikely(ring_3(regs)) )
{
default:
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/io.c Wed Sep 19 14:25:44 2007 +0100
@@ -858,7 +858,6 @@ void hvm_io_assist(void)
/* Copy register changes back into current guest state. */
regs->eflags &= ~X86_EFLAGS_RF;
- hvm_load_cpu_guest_regs(v, regs);
memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
out:
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/platform.c Wed Sep 19 14:25:44 2007 +0100
@@ -1032,7 +1032,6 @@ void handle_mmio(unsigned long gpa)
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- hvm_store_cpu_guest_regs(v, regs);
df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/svm/emulate.c
--- a/xen/arch/x86/hvm/svm/emulate.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/svm/emulate.c Wed Sep 19 14:25:44 2007 +0100
@@ -59,8 +59,8 @@ extern int inst_copy_from_guest(unsigned
#define DECODE_SIB_BASE(prefix, sib) DECODE_MODRM_RM(prefix, sib)
-static inline unsigned long DECODE_GPR_VALUE(struct vmcb_struct *vmcb,
- struct cpu_user_regs *regs, u8 gpr_rm)
+static inline unsigned long DECODE_GPR_VALUE(
+ struct cpu_user_regs *regs, u8 gpr_rm)
{
unsigned long value;
switch (gpr_rm)
@@ -78,7 +78,7 @@ static inline unsigned long DECODE_GPR_V
value = regs->ebx;
break;
case 0x4:
- value = (unsigned long)vmcb->rsp;
+ value = regs->esp;
case 0x5:
value = regs->ebp;
break;
@@ -172,7 +172,7 @@ unsigned long get_effective_addr_modrm64
}
else
{
- effective_addr = DECODE_GPR_VALUE(vmcb, regs, modrm_rm);
+ effective_addr = DECODE_GPR_VALUE(regs, modrm_rm);
}
break;
@@ -202,12 +202,12 @@ unsigned long get_effective_addr_modrm64
#if __x86_64__
/* 64-bit mode */
if (vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v))
- return vmcb->rip + inst_len + *size + disp;
+ return regs->eip + inst_len + *size + disp;
#endif
return disp;
default:
- effective_addr = DECODE_GPR_VALUE(vmcb, regs, modrm_rm);
+ effective_addr = DECODE_GPR_VALUE(regs, modrm_rm);
}
@@ -251,7 +251,7 @@ unsigned long get_effective_addr_sib(str
sib_idx = DECODE_SIB_INDEX(prefix, sib);
sib_base = DECODE_SIB_BASE(prefix, sib);
- base = DECODE_GPR_VALUE(vmcb, regs, sib_base);
+ base = DECODE_GPR_VALUE(regs, sib_base);
if ((unsigned long)-1 == base)
{
@@ -293,7 +293,7 @@ unsigned long get_effective_addr_sib(str
if (4 == sib_idx)
return base;
- effective_addr = DECODE_GPR_VALUE(vmcb, regs, sib_idx);
+ effective_addr = DECODE_GPR_VALUE(regs, sib_idx);
effective_addr <<= sib_scale;
@@ -326,7 +326,8 @@ unsigned long svm_rip2pointer(struct vcp
* no matter what kind of addressing is used.
*/
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- unsigned long p = vmcb->cs.base + vmcb->rip;
+ unsigned long p = vmcb->cs.base + guest_cpu_user_regs()->eip;
+ ASSERT(v == current);
if (!(vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v)))
return (u32)p; /* mask to 32 bits */
/* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Sep 19 14:25:44 2007 +0100
@@ -72,6 +72,14 @@ static void *root_vmcb[NR_CPUS] __read_m
/* hardware assisted paging bits */
extern int opt_hap_enabled;
+static void inline __update_guest_eip(
+ struct cpu_user_regs *regs, int inst_len)
+{
+ ASSERT(inst_len > 0);
+ regs->eip += inst_len;
+ regs->eflags &= ~X86_EFLAGS_RF;
+}
+
static void svm_inject_exception(
struct vcpu *v, int trap, int ev, int error_code)
{
@@ -106,16 +114,6 @@ static int svm_lme_is_set(struct vcpu *v
#else
return 0;
#endif
-}
-
-static void svm_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- regs->esp = vmcb->rsp;
- regs->eflags = vmcb->rflags;
- regs->eip = vmcb->rip;
}
static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
@@ -233,29 +231,10 @@ int svm_vmcb_save(struct vcpu *v, struct
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- c->rip = vmcb->rip;
-
-#ifdef HVM_DEBUG_SUSPEND
- printk("%s: eip=0x%"PRIx64".\n",
- __func__,
- inst_len, c->eip);
-#endif
-
- c->rsp = vmcb->rsp;
- c->rflags = vmcb->rflags;
-
c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
-
-#ifdef HVM_DEBUG_SUSPEND
- printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
- __func__,
- c->cr3,
- c->cr0,
- c->cr4);
-#endif
c->idtr_limit = vmcb->idtr.limit;
c->idtr_base = vmcb->idtr.base;
@@ -354,10 +333,6 @@ int svm_vmcb_restore(struct vcpu *v, str
v->arch.guest_table = pagetable_from_pfn(mfn);
}
-
- vmcb->rip = c->rip;
- vmcb->rsp = c->rsp;
- vmcb->rflags = c->rflags;
v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
@@ -518,7 +493,8 @@ static int svm_interrupts_enabled(struct
return !vmcb->interrupt_shadow;
ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
- return !irq_masked(vmcb->rflags) && !vmcb->interrupt_shadow;
+ return (!irq_masked(guest_cpu_user_regs()->eflags) &&
+ !vmcb->interrupt_shadow);
}
static int svm_guest_x86_mode(struct vcpu *v)
@@ -527,7 +503,7 @@ static int svm_guest_x86_mode(struct vcp
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
@@ -785,7 +761,6 @@ static void svm_init_ap_context(
*/
svm_reset_to_realmode(v, regs);
/* Adjust the vmcb's hidden register state. */
- vmcb->rip = 0;
vmcb->cs.sel = cs_sel;
vmcb->cs.base = (cs_sel << 4);
}
@@ -808,15 +783,6 @@ static void svm_init_hypercall_page(stru
/* Don't support HYPERVISOR_iret at the moment */
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
-}
-
-static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- vmcb->rsp = regs->esp;
- vmcb->rflags = regs->eflags | 2UL;
- vmcb->rip = regs->eip;
}
static void svm_ctxt_switch_from(struct vcpu *v)
@@ -950,8 +916,6 @@ static struct hvm_function_table svm_fun
.domain_destroy = svm_domain_destroy,
.vcpu_initialise = svm_vcpu_initialise,
.vcpu_destroy = svm_vcpu_destroy,
- .store_cpu_guest_regs = svm_store_cpu_guest_regs,
- .load_cpu_guest_regs = svm_load_cpu_guest_regs,
.save_cpu_ctxt = svm_save_vmcb_ctxt,
.load_cpu_ctxt = svm_load_vmcb_ctxt,
.interrupts_enabled = svm_interrupts_enabled,
@@ -1144,7 +1108,7 @@ static void svm_vmexit_do_cpuid(struct v
inst_len = __get_instruction_length(v, INSTR_CPUID, NULL);
ASSERT(inst_len > 0);
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
}
static unsigned long *get_reg_p(
@@ -1176,7 +1140,7 @@ static unsigned long *get_reg_p(
reg_p = (unsigned long *)®s->ebp;
break;
case SVM_REG_ESP:
- reg_p = (unsigned long *)&vmcb->rsp;
+ reg_p = (unsigned long *)®s->esp;
break;
#ifdef __x86_64__
case SVM_REG_R8:
@@ -1348,7 +1312,7 @@ static int svm_get_io_address(
* than one byte (+ maybe rep-prefix), we have some prefix so we need
* to figure out what it is...
*/
- isize = vmcb->exitinfo2 - vmcb->rip;
+ isize = vmcb->exitinfo2 - regs->eip;
if (info.fields.rep)
isize --;
@@ -1501,7 +1465,6 @@ static void svm_io_instruction(struct vc
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- svm_store_cpu_guest_regs(v, regs);
info.bytes = vmcb->exitinfo1;
@@ -1524,7 +1487,7 @@ static void svm_io_instruction(struct vc
HVM_DBG_LOG(DBG_LEVEL_IO,
"svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
"exit_qualification = %"PRIx64,
- port, vmcb->cs.sel, vmcb->rip, info.bytes);
+ port, vmcb->cs.sel, (uint64_t)regs->eip, info.bytes);
/* string instruction */
if (info.fields.str)
@@ -1775,7 +1738,7 @@ static void svm_cr_access(
if (index > 0 && (buffer[index-1] & 0xF0) == 0x40)
prefix = buffer[index-1];
- HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
+ HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long)regs->eip);
switch ( match )
@@ -1870,7 +1833,7 @@ static void svm_cr_access(
ASSERT(inst_len);
if ( result )
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
}
static void svm_do_msr_access(
@@ -1993,14 +1956,15 @@ static void svm_do_msr_access(
inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL);
}
- __update_guest_eip(vmcb, inst_len);
-}
-
-static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
+ __update_guest_eip(regs, inst_len);
+}
+
+static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
+ struct cpu_user_regs *regs)
{
enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
- __update_guest_eip(vmcb, 1);
+ __update_guest_eip(regs, 1);
/* Check for interrupt not handled or new interrupt. */
if ( vmcb->eventinj.fields.v ||
@@ -2011,13 +1975,12 @@ static void svm_vmexit_do_hlt(struct vmc
}
HVMTRACE_1D(HLT, current, /*int pending=*/ 0);
- hvm_hlt(vmcb->rflags);
-}
-
-static void svm_vmexit_do_invd(struct vcpu *v)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int inst_len;
+ hvm_hlt(regs->eflags);
+}
+
+static void svm_vmexit_do_invd(struct cpu_user_regs *regs)
+{
+ int inst_len;
/* Invalidate the cache - we can't really do that safely - maybe we should
* WBINVD, but I think it's just fine to completely ignore it - we should
@@ -2029,8 +1992,8 @@ static void svm_vmexit_do_invd(struct vc
*/
gdprintk(XENLOG_WARNING, "INVD instruction intercepted - ignored\n");
- inst_len = __get_instruction_length(v, INSTR_INVD, NULL);
- __update_guest_eip(vmcb, inst_len);
+ inst_len = __get_instruction_length(current, INSTR_INVD, NULL);
+ __update_guest_eip(regs, inst_len);
}
void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
@@ -2039,7 +2002,6 @@ void svm_handle_invlpg(const short invlp
u8 opcode[MAX_INST_LEN], prefix, length = MAX_INST_LEN;
unsigned long g_vaddr;
int inst_len;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
/*
* Unknown how many bytes the invlpg instruction will take. Use the
@@ -2056,7 +2018,7 @@ void svm_handle_invlpg(const short invlp
{
inst_len = __get_instruction_length(v, INSTR_INVLPGA, opcode);
ASSERT(inst_len > 0);
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
/*
* The address is implicit on this instruction. At the moment, we don't
@@ -2083,7 +2045,7 @@ void svm_handle_invlpg(const short invlp
&opcode[inst_len], &length);
inst_len += length;
- __update_guest_eip (vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
}
HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
@@ -2106,6 +2068,8 @@ static int svm_reset_to_realmode(struct
memset(regs, 0, sizeof(struct cpu_user_regs));
+ regs->eflags = 2;
+
v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
svm_update_guest_cr(v, 0);
@@ -2118,7 +2082,7 @@ static int svm_reset_to_realmode(struct
vmcb->efer = EFER_SVME;
/* This will jump to ROMBIOS */
- vmcb->rip = 0xFFF0;
+ regs->eip = 0xFFF0;
/* Set up the segment registers and all their hidden states. */
vmcb->cs.sel = 0xF000;
@@ -2171,16 +2135,12 @@ static int svm_reset_to_realmode(struct
vmcb->idtr.limit = 0x3ff;
vmcb->idtr.base = 0x00;
- vmcb->rax = 0;
- vmcb->rsp = 0;
-
return 0;
}
asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
{
unsigned int exit_reason;
- unsigned long eip;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
eventinj_t eventinj;
@@ -2198,7 +2158,7 @@ asmlinkage void svm_vmexit_handler(struc
exit_reason = vmcb->exitcode;
- HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
+ HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
if ( unlikely(exit_reason == VMEXIT_INVALID) )
{
@@ -2207,7 +2167,6 @@ asmlinkage void svm_vmexit_handler(struc
}
perfc_incra(svmexits, exit_reason);
- eip = vmcb->rip;
/* Event delivery caused this intercept? Queue for redelivery. */
eventinj = vmcb->exitintinfo;
@@ -2244,7 +2203,7 @@ asmlinkage void svm_vmexit_handler(struc
goto exit_and_crash;
/* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */
inst_len = __get_instruction_length(v, INSTR_INT3, NULL);
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
domain_pause_for_debugger();
break;
@@ -2275,7 +2234,6 @@ asmlinkage void svm_vmexit_handler(struc
case VMEXIT_EXCEPTION_MC:
HVMTRACE_0D(MCE, v);
- svm_store_cpu_guest_regs(v, regs);
do_machine_check(regs);
break;
@@ -2285,7 +2243,7 @@ asmlinkage void svm_vmexit_handler(struc
break;
case VMEXIT_INVD:
- svm_vmexit_do_invd(v);
+ svm_vmexit_do_invd(regs);
break;
case VMEXIT_TASK_SWITCH: {
@@ -2308,7 +2266,7 @@ asmlinkage void svm_vmexit_handler(struc
break;
case VMEXIT_HLT:
- svm_vmexit_do_hlt(vmcb);
+ svm_vmexit_do_hlt(vmcb, regs);
break;
case VMEXIT_INVLPG:
@@ -2326,7 +2284,7 @@ asmlinkage void svm_vmexit_handler(struc
rc = hvm_do_hypercall(regs);
if ( rc != HVM_HCALL_preempted )
{
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
if ( rc == HVM_HCALL_invalidate )
send_invalidate_req();
}
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S Wed Sep 19 14:25:44 2007 +0100
@@ -58,6 +58,12 @@ svm_trace_done:
movl VCPU_svm_vmcb(%ebx),%ecx
movl UREGS_eax(%esp),%eax
movl %eax,VMCB_rax(%ecx)
+ movl UREGS_eip(%esp),%eax
+ movl %eax,VMCB_rip(%ecx)
+ movl UREGS_esp(%esp),%eax
+ movl %eax,VMCB_rsp(%ecx)
+ movl UREGS_eflags(%esp),%eax
+ movl %eax,VMCB_rflags(%ecx)
movl VCPU_svm_vmcb_pa(%ebx),%eax
popl %ebx
@@ -81,6 +87,12 @@ svm_trace_done:
movl VCPU_svm_vmcb(%ebx),%ecx
movl VMCB_rax(%ecx),%eax
movl %eax,UREGS_eax(%esp)
+ movl VMCB_rip(%ecx),%eax
+ movl %eax,UREGS_eip(%esp)
+ movl VMCB_rsp(%ecx),%eax
+ movl %eax,UREGS_esp(%esp)
+ movl VMCB_rflags(%ecx),%eax
+ movl %eax,UREGS_eflags(%esp)
STGI
.globl svm_stgi_label;
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S Wed Sep 19 14:25:44 2007 +0100
@@ -59,6 +59,12 @@ svm_trace_done:
movq VCPU_svm_vmcb(%rbx),%rcx
movq UREGS_rax(%rsp),%rax
movq %rax,VMCB_rax(%rcx)
+ movq UREGS_rip(%rsp),%rax
+ movq %rax,VMCB_rip(%rcx)
+ movq UREGS_rsp(%rsp),%rax
+ movq %rax,VMCB_rsp(%rcx)
+ movq UREGS_eflags(%rsp),%rax
+ movq %rax,VMCB_rflags(%rcx)
movq VCPU_svm_vmcb_pa(%rbx),%rax
popq %r15
@@ -100,6 +106,12 @@ svm_trace_done:
movq VCPU_svm_vmcb(%rbx),%rcx
movq VMCB_rax(%rcx),%rax
movq %rax,UREGS_rax(%rsp)
+ movq VMCB_rip(%rcx),%rax
+ movq %rax,UREGS_rip(%rsp)
+ movq VMCB_rsp(%rcx),%rax
+ movq %rax,UREGS_rsp(%rsp)
+ movq VMCB_rflags(%rcx),%rax
+ movq %rax,UREGS_eflags(%rsp)
STGI
.globl svm_stgi_label;
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Sep 19 14:25:44 2007 +0100
@@ -437,11 +437,9 @@ static int vmx_guest_x86_mode(struct vcp
{
unsigned int cs_ar_bytes;
- ASSERT(v == current);
-
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
if ( hvm_long_mode_enabled(v) &&
@@ -485,21 +483,12 @@ void vmx_vmcs_save(struct vcpu *v, struc
vmx_vmcs_enter(v);
- c->rip = __vmread(GUEST_RIP);
- c->rsp = __vmread(GUEST_RSP);
- c->rflags = __vmread(GUEST_RFLAGS);
-
c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
c->msr_efer = v->arch.hvm_vcpu.guest_efer;
-
-#ifdef HVM_DEBUG_SUSPEND
- printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
- __func__, c->cr3, c->cr0, c->cr4);
-#endif
c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
c->idtr_base = __vmread(GUEST_IDTR_BASE);
@@ -593,10 +582,6 @@ int vmx_vmcs_restore(struct vcpu *v, str
v->arch.guest_table = pagetable_from_pfn(mfn);
vmx_vmcs_enter(v);
-
- __vmwrite(GUEST_RIP, c->rip);
- __vmwrite(GUEST_RSP, c->rsp);
- __vmwrite(GUEST_RFLAGS, c->rflags);
v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
@@ -791,30 +776,6 @@ static void vmx_ctxt_switch_to(struct vc
{
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
-}
-
-static void vmx_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs)
-{
- vmx_vmcs_enter(v);
-
- regs->eflags = __vmread(GUEST_RFLAGS);
- regs->eip = __vmread(GUEST_RIP);
- regs->esp = __vmread(GUEST_RSP);
-
- vmx_vmcs_exit(v);
-}
-
-static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
- vmx_vmcs_enter(v);
-
- /* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */
- __vmwrite(GUEST_RFLAGS, regs->eflags | 2UL);
- __vmwrite(GUEST_RIP, regs->eip);
- __vmwrite(GUEST_RSP, regs->esp);
-
- vmx_vmcs_exit(v);
}
static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
@@ -1061,9 +1022,7 @@ static void vmx_init_hypercall_page(stru
static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
- unsigned long intr_shadow, eflags;
-
- ASSERT(v == current);
+ unsigned long intr_shadow;
intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
@@ -1073,8 +1032,7 @@ static int vmx_interrupts_enabled(struct
VMX_INTR_SHADOW_NMI));
ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
- eflags = __vmread(GUEST_RFLAGS);
- return (!irq_masked(eflags) &&
+ return (!irq_masked(guest_cpu_user_regs()->eflags) &&
!(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
}
@@ -1193,8 +1151,6 @@ static struct hvm_function_table vmx_fun
.domain_destroy = vmx_domain_destroy,
.vcpu_initialise = vmx_vcpu_initialise,
.vcpu_destroy = vmx_vcpu_destroy,
- .store_cpu_guest_regs = vmx_store_cpu_guest_regs,
- .load_cpu_guest_regs = vmx_load_cpu_guest_regs,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
.interrupts_enabled = vmx_interrupts_enabled,
@@ -1284,14 +1240,11 @@ static int __get_instruction_length(void
static void __update_guest_eip(unsigned long inst_len)
{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long x;
- x = __vmread(GUEST_RIP);
- __vmwrite(GUEST_RIP, x + inst_len);
-
- x = __vmread(GUEST_RFLAGS);
- if ( x & X86_EFLAGS_RF )
- __vmwrite(GUEST_RFLAGS, x & ~X86_EFLAGS_RF);
+ regs->eip += inst_len;
+ regs->eflags &= ~X86_EFLAGS_RF;
x = __vmread(GUEST_INTERRUPTIBILITY_INFO);
if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
@@ -1435,15 +1388,9 @@ static void vmx_dr_access(unsigned long
*/
static void vmx_do_invlpg(unsigned long va)
{
- unsigned long eip;
struct vcpu *v = current;
HVMTRACE_2D(INVLPG, v, /*invlpga=*/ 0, va);
-
- eip = __vmread(GUEST_RIP);
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
- eip, va);
/*
* We do the safest things first, then try to update the shadow
@@ -1852,7 +1799,6 @@ static void vmx_io_instruction(unsigned
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- vmx_store_cpu_guest_regs(current, regs);
HVM_DBG_LOG(DBG_LEVEL_IO, "vm86 %d, eip=%x:%lx, "
"exit_qualification = %lx",
@@ -1891,12 +1837,12 @@ static void vmx_io_instruction(unsigned
static void vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
{
- /* NB. Skip transition instruction. */
- c->eip = __vmread(GUEST_RIP);
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+ c->eip = regs->eip;
c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
-
- c->esp = __vmread(GUEST_RSP);
- c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;
+ c->esp = regs->esp;
+ c->eflags = regs->eflags & ~X86_EFLAGS_RF;
c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
@@ -1951,6 +1897,7 @@ static void vmx_world_save(struct vcpu *
static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long mfn = 0;
p2m_type_t p2mt;
@@ -1969,9 +1916,9 @@ static int vmx_world_restore(struct vcpu
v->arch.guest_table = pagetable_from_pfn(mfn);
- __vmwrite(GUEST_RIP, c->eip);
- __vmwrite(GUEST_RSP, c->esp);
- __vmwrite(GUEST_RFLAGS, c->eflags);
+ regs->eip = c->eip;
+ regs->esp = c->esp;
+ regs->eflags = c->eflags | 2;
v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
@@ -2121,7 +2068,6 @@ static int vmx_set_cr0(unsigned long val
static int vmx_set_cr0(unsigned long value)
{
struct vcpu *v = current;
- unsigned long eip;
int rc = hvm_set_cr0(value);
if ( rc == 0 )
@@ -2142,24 +2088,12 @@ static int vmx_set_cr0(unsigned long val
if ( !(value & X86_CR0_PE) )
{
if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
- {
- eip = __vmread(GUEST_RIP);
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Transfering control to vmxassist %%eip 0x%lx", eip);
return 0; /* do not update eip! */
- }
}
else if ( v->arch.hvm_vmx.vmxassist_enabled )
{
- eip = __vmread(GUEST_RIP);
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Enabling CR0.PE at %%eip 0x%lx", eip);
if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
- {
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Restoring to %%eip 0x%lx", eip);
return 0; /* do not update eip! */
- }
}
return 1;
@@ -2204,10 +2138,8 @@ static int mov_to_cr(int gp, int cr, str
CASE_GET_REG(EBP, ebp);
CASE_GET_REG(ESI, esi);
CASE_GET_REG(EDI, edi);
+ CASE_GET_REG(ESP, esp);
CASE_EXTEND_GET_REG;
- case REG_ESP:
- value = __vmread(GUEST_RSP);
- break;
default:
gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
goto exit_and_crash;
@@ -2276,11 +2208,8 @@ static void mov_from_cr(int cr, int gp,
CASE_SET_REG(EBP, ebp);
CASE_SET_REG(ESI, esi);
CASE_SET_REG(EDI, edi);
+ CASE_SET_REG(ESP, esp);
CASE_EXTEND_SET_REG;
- case REG_ESP:
- __vmwrite(GUEST_RSP, value);
- regs->esp = value;
- break;
default:
printk("invalid gp: %d\n", gp);
domain_crash(v->domain);
@@ -2521,12 +2450,10 @@ gp_fault:
return 0;
}
-static void vmx_do_hlt(void)
-{
- unsigned long rflags;
+static void vmx_do_hlt(struct cpu_user_regs *regs)
+{
HVMTRACE_0D(HLT, current);
- rflags = __vmread(GUEST_RFLAGS);
- hvm_hlt(rflags);
+ hvm_hlt(regs->eflags);
}
static void vmx_do_extint(struct cpu_user_regs *regs)
@@ -2601,7 +2528,6 @@ static void vmx_failed_vmentry(unsigned
case EXIT_REASON_MACHINE_CHECK:
printk("caused by machine check.\n");
HVMTRACE_0D(MCE, current);
- vmx_store_cpu_guest_regs(current, regs);
do_machine_check(regs);
break;
default:
@@ -2624,7 +2550,7 @@ asmlinkage void vmx_vmexit_handler(struc
exit_reason = __vmread(VM_EXIT_REASON);
- HVMTRACE_2D(VMEXIT, v, __vmread(GUEST_RIP), exit_reason);
+ HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
perfc_incra(vmexits, exit_reason);
@@ -2723,12 +2649,10 @@ asmlinkage void vmx_vmexit_handler(struc
(X86_EVENTTYPE_NMI << 8) )
goto exit_and_crash;
HVMTRACE_0D(NMI, v);
- vmx_store_cpu_guest_regs(v, regs);
do_nmi(regs); /* Real NMI, vector 2: normal processing. */
break;
case TRAP_machine_check:
HVMTRACE_0D(MCE, v);
- vmx_store_cpu_guest_regs(v, regs);
do_machine_check(regs);
break;
default:
@@ -2775,7 +2699,7 @@ asmlinkage void vmx_vmexit_handler(struc
case EXIT_REASON_HLT:
inst_len = __get_instruction_length(); /* Safe: HLT */
__update_guest_eip(inst_len);
- vmx_do_hlt();
+ vmx_do_hlt(regs);
break;
case EXIT_REASON_INVLPG:
{
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Wed Sep 19 14:25:44 2007 +0100
@@ -22,6 +22,16 @@
#include <asm/apicdef.h>
#include <asm/page.h>
#include <public/xen.h>
+
+#define VMRESUME .byte 0x0f,0x01,0xc3
+#define VMLAUNCH .byte 0x0f,0x01,0xc2
+#define VMREAD(off) .byte 0x0f,0x78,0x44,0x24,off
+#define VMWRITE(off) .byte 0x0f,0x79,0x44,0x24,off
+
+/* VMCS field encodings */
+#define GUEST_RSP 0x681c
+#define GUEST_RIP 0x681e
+#define GUEST_RFLAGS 0x6820
#define GET_CURRENT(reg) \
movl $STACK_SIZE-4, reg; \
@@ -51,6 +61,14 @@
ALIGN
ENTRY(vmx_asm_vmexit_handler)
HVM_SAVE_ALL_NOSEGREGS
+
+ movl $GUEST_RIP,%eax
+ VMREAD(UREGS_eip)
+ movl $GUEST_RSP,%eax
+ VMREAD(UREGS_esp)
+ movl $GUEST_RFLAGS,%eax
+ VMREAD(UREGS_eflags)
+
movl %esp,%eax
push %eax
call vmx_vmexit_handler
@@ -78,13 +96,19 @@ ENTRY(vmx_asm_do_vmentry)
movl %eax,%cr2
call vmx_trace_vmentry
+ movl $GUEST_RIP,%eax
+ VMWRITE(UREGS_eip)
+ movl $GUEST_RSP,%eax
+ VMWRITE(UREGS_esp)
+ movl $GUEST_RFLAGS,%eax
+ VMWRITE(UREGS_eflags)
+
cmpl $0,VCPU_vmx_launched(%ebx)
je vmx_launch
/*vmx_resume:*/
HVM_RESTORE_ALL_NOSEGREGS
- /* VMRESUME */
- .byte 0x0f,0x01,0xc3
+ VMRESUME
pushf
call vm_resume_fail
ud2
@@ -92,8 +116,7 @@ vmx_launch:
vmx_launch:
movl $1,VCPU_vmx_launched(%ebx)
HVM_RESTORE_ALL_NOSEGREGS
- /* VMLAUNCH */
- .byte 0x0f,0x01,0xc2
+ VMLAUNCH
pushf
call vm_launch_fail
ud2
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Wed Sep 19 14:25:44 2007 +0100
@@ -22,6 +22,16 @@
#include <asm/apicdef.h>
#include <asm/page.h>
#include <public/xen.h>
+
+#define VMRESUME .byte 0x0f,0x01,0xc3
+#define VMLAUNCH .byte 0x0f,0x01,0xc2
+#define VMREAD(off) .byte 0x0f,0x78,0x44,0x24,off
+#define VMWRITE(off) .byte 0x0f,0x79,0x44,0x24,off
+
+/* VMCS field encodings */
+#define GUEST_RSP 0x681c
+#define GUEST_RIP 0x681e
+#define GUEST_RFLAGS 0x6820
#define GET_CURRENT(reg) \
movq $STACK_SIZE-8, reg; \
@@ -66,6 +76,14 @@
ALIGN
ENTRY(vmx_asm_vmexit_handler)
HVM_SAVE_ALL_NOSEGREGS
+
+ movl $GUEST_RIP,%eax
+ VMREAD(UREGS_rip)
+ movl $GUEST_RSP,%eax
+ VMREAD(UREGS_rsp)
+ movl $GUEST_RFLAGS,%eax
+ VMREAD(UREGS_eflags)
+
movq %rsp,%rdi
call vmx_vmexit_handler
jmp vmx_asm_do_vmentry
@@ -92,13 +110,19 @@ ENTRY(vmx_asm_do_vmentry)
movq %rax,%cr2
call vmx_trace_vmentry
+ movl $GUEST_RIP,%eax
+ VMWRITE(UREGS_rip)
+ movl $GUEST_RSP,%eax
+ VMWRITE(UREGS_rsp)
+ movl $GUEST_RFLAGS,%eax
+ VMWRITE(UREGS_eflags)
+
cmpl $0,VCPU_vmx_launched(%rbx)
je vmx_launch
/*vmx_resume:*/
HVM_RESTORE_ALL_NOSEGREGS
- /* VMRESUME */
- .byte 0x0f,0x01,0xc3
+ VMRESUME
pushfq
call vm_resume_fail
ud2
@@ -106,8 +130,7 @@ vmx_launch:
vmx_launch:
movl $1,VCPU_vmx_launched(%rbx)
HVM_RESTORE_ALL_NOSEGREGS
- /* VMLAUNCH */
- .byte 0x0f,0x01,0xc2
+ VMLAUNCH
pushfq
call vm_launch_fail
ud2
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Wed Sep 19 14:25:44 2007 +0100
@@ -2928,8 +2928,6 @@ static int sh_page_fault(struct vcpu *v,
sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
goto done;
}
-
- hvm_store_cpu_guest_regs(v, regs);
}
SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
@@ -2992,10 +2990,6 @@ static int sh_page_fault(struct vcpu *v,
}
}
#endif /* PAE guest */
-
- /* Emulator has changed the user registers: write back */
- if ( is_hvm_domain(d) )
- hvm_load_cpu_guest_regs(v, regs);
SHADOW_PRINTK("emulated\n");
return EXCRET_fault_fixed;
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/oprofile/op_model_athlon.c
--- a/xen/arch/x86/oprofile/op_model_athlon.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/oprofile/op_model_athlon.c Wed Sep 19 14:25:44 2007 +0100
@@ -119,7 +119,6 @@ static int athlon_check_ctrs(unsigned in
(regs->eip == (unsigned long)svm_stgi_label)) {
/* SVM guest was running when NMI occurred */
ASSERT(is_hvm_vcpu(v));
- hvm_store_cpu_guest_regs(v, guest_regs);
eip = guest_regs->eip;
mode = xenoprofile_get_mode(v, guest_regs);
} else {
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/x86_32/asm-offsets.c Wed Sep 19 14:25:44 2007 +0100
@@ -89,6 +89,9 @@ void __dummy__(void)
BLANK();
OFFSET(VMCB_rax, struct vmcb_struct, rax);
+ OFFSET(VMCB_rip, struct vmcb_struct, rip);
+ OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
+ OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
BLANK();
OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/x86_32/traps.c Wed Sep 19 14:25:44 2007 +0100
@@ -47,7 +47,6 @@ void show_registers(struct cpu_user_regs
{
struct segment_register sreg;
context = "hvm";
- hvm_store_cpu_guest_regs(v, &fault_regs);
fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/x86_64/asm-offsets.c Wed Sep 19 14:25:44 2007 +0100
@@ -95,6 +95,9 @@ void __dummy__(void)
BLANK();
OFFSET(VMCB_rax, struct vmcb_struct, rax);
+ OFFSET(VMCB_rip, struct vmcb_struct, rip);
+ OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
+ OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
BLANK();
OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
diff -r 202153d094d8 -r ec3b23d8d544 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/arch/x86/x86_64/traps.c Wed Sep 19 14:25:44 2007 +0100
@@ -50,7 +50,6 @@ void show_registers(struct cpu_user_regs
{
struct segment_register sreg;
context = "hvm";
- hvm_store_cpu_guest_regs(v, &fault_regs);
fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
diff -r 202153d094d8 -r ec3b23d8d544 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Wed Sep 19 14:25:44 2007 +0100
@@ -21,6 +21,7 @@
#ifndef __ASM_X86_HVM_HVM_H__
#define __ASM_X86_HVM_HVM_H__
+#include <asm/current.h>
#include <asm/x86_emulate.h>
#include <public/domctl.h>
#include <public/hvm/save.h>
@@ -79,16 +80,6 @@ struct hvm_function_table {
int (*vcpu_initialise)(struct vcpu *v);
void (*vcpu_destroy)(struct vcpu *v);
- /*
- * Store and load guest state:
- * 1) load/store guest register state,
- * 2) modify guest state (e.g., set debug flags).
- */
- void (*store_cpu_guest_regs)(
- struct vcpu *v, struct cpu_user_regs *r);
- void (*load_cpu_guest_regs)(
- struct vcpu *v, struct cpu_user_regs *r);
-
/* save and load hvm guest cpu context for save/restore */
void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
@@ -166,19 +157,6 @@ void hvm_vcpu_reset(struct vcpu *vcpu);
void hvm_send_assist_req(struct vcpu *v);
-static inline void
-hvm_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *r)
-{
- hvm_funcs.store_cpu_guest_regs(v, r);
-}
-
-static inline void
-hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
-{
- hvm_funcs.load_cpu_guest_regs(v, r);
-}
-
void hvm_set_guest_time(struct vcpu *v, u64 gtime);
u64 hvm_get_guest_time(struct vcpu *v);
@@ -199,12 +177,14 @@ static inline int
static inline int
hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
+ ASSERT(v == current);
return hvm_funcs.interrupts_enabled(v, type);
}
static inline int
hvm_guest_x86_mode(struct vcpu *v)
{
+ ASSERT(v == current);
return hvm_funcs.guest_x86_mode(v);
}
diff -r 202153d094d8 -r ec3b23d8d544 xen/include/asm-x86/hvm/svm/emulate.h
--- a/xen/include/asm-x86/hvm/svm/emulate.h Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/emulate.h Wed Sep 19 14:25:44 2007 +0100
@@ -131,16 +131,6 @@ static inline int skip_prefix_bytes(u8 *
return index;
}
-
-
-static void inline __update_guest_eip(
- struct vmcb_struct *vmcb, int inst_len)
-{
- ASSERT(inst_len > 0);
- vmcb->rip += inst_len;
- vmcb->rflags &= ~X86_EFLAGS_RF;
-}
-
#endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */
/*
diff -r 202153d094d8 -r ec3b23d8d544 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h Wed Sep 19 12:12:49 2007 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h Wed Sep 19 14:25:44 2007 +0100
@@ -66,7 +66,7 @@ struct hvm_vcpu {
#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
-#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, error_code))
+#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, ss))
#endif /* __ASM_X86_HVM_VCPU_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|