# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1183803694 -3600
# Node ID 50c18666d6607a58e128ad1d3758a98cb9881de3
# Parent 41c8284cfc0cfaa773771a2d9be85cad1fbd4079
SVM: Sync with VMX code changes.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/svm/emulate.c | 4 -
xen/arch/x86/hvm/svm/svm.c | 88 ++++++++++++++++++++++++++++----------
xen/include/asm-x86/hvm/svm/svm.h | 40 -----------------
3 files changed, 67 insertions(+), 65 deletions(-)
diff -r 41c8284cfc0c -r 50c18666d660 xen/arch/x86/hvm/svm/emulate.c
--- a/xen/arch/x86/hvm/svm/emulate.c Sat Jul 07 11:08:57 2007 +0100
+++ b/xen/arch/x86/hvm/svm/emulate.c Sat Jul 07 11:21:34 2007 +0100
@@ -201,7 +201,7 @@ unsigned long get_effective_addr_modrm64
#if __x86_64__
/* 64-bit mode */
- if (vmcb->cs.attr.fields.l && svm_long_mode_enabled(v))
+ if (vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v))
return vmcb->rip + inst_len + *size + disp;
#endif
return disp;
@@ -327,7 +327,7 @@ unsigned long svm_rip2pointer(struct vcp
*/
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned long p = vmcb->cs.base + vmcb->rip;
- if (!(vmcb->cs.attr.fields.l && svm_long_mode_enabled(v)))
+ if (!(vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v)))
return (u32)p; /* mask to 32 bits */
/* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
return p;
diff -r 41c8284cfc0c -r 50c18666d660 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Sat Jul 07 11:08:57 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Sat Jul 07 11:21:34 2007 +0100
@@ -69,7 +69,7 @@ static void *root_vmcb[NR_CPUS] __read_m
/* hardware assisted paging bits */
extern int opt_hap_enabled;
-static inline void svm_inject_exception(struct vcpu *v, int trap,
+static void svm_inject_exception(struct vcpu *v, int trap,
int ev, int error_code)
{
eventinj_t event;
@@ -98,6 +98,57 @@ static void stop_svm(void)
write_efer(read_efer() & ~EFER_SVME);
}
+#ifdef __x86_64__
+
+static int svm_lme_is_set(struct vcpu *v)
+{
+ u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ return guest_efer & EFER_LME;
+}
+
+static int svm_long_mode_enabled(struct vcpu *v)
+{
+ u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ return guest_efer & EFER_LMA;
+}
+
+#else /* __i386__ */
+
+static int svm_lme_is_set(struct vcpu *v)
+{ return 0; }
+static int svm_long_mode_enabled(struct vcpu *v)
+{ return 0; }
+
+#endif
+
+static int svm_cr4_pae_is_set(struct vcpu *v)
+{
+ unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+ return guest_cr4 & X86_CR4_PAE;
+}
+
+static int svm_paging_enabled(struct vcpu *v)
+{
+ unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
+ return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
+}
+
+static int svm_pae_enabled(struct vcpu *v)
+{
+ unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+ return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
+}
+
+static int svm_nx_enabled(struct vcpu *v)
+{
+ return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
+}
+
+static int svm_pgbit_test(struct vcpu *v)
+{
+ return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
+}
+
static void svm_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
{
@@ -122,7 +173,7 @@ static void svm_store_cpu_guest_regs(
}
}
-static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
+static int long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
u32 ecx = regs->ecx;
@@ -149,7 +200,6 @@ static inline int long_mode_do_msr_write
goto gp_fault;
}
-#ifdef __x86_64__
if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
{
/* EFER.LME transition from 0 to 1. */
@@ -170,7 +220,6 @@ static inline int long_mode_do_msr_write
goto gp_fault;
}
}
-#endif /* __x86_64__ */
v->arch.hvm_svm.cpu_shadow_efer = msr_content;
vmcb->efer = msr_content | EFER_SVME;
@@ -204,7 +253,7 @@ static inline int long_mode_do_msr_write
#define savedebug(_v,_reg) \
asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
-static inline void svm_save_dr(struct vcpu *v)
+static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -224,7 +273,7 @@ static inline void svm_save_dr(struct vc
}
-static inline void __restore_debug_registers(struct vcpu *v)
+static void __restore_debug_registers(struct vcpu *v)
{
loaddebug(&v->arch.guest_context, 0);
loaddebug(&v->arch.guest_context, 1);
@@ -546,7 +595,7 @@ static int svm_load_vmcb_ctxt(struct vcp
return 0;
}
-static inline void svm_restore_dr(struct vcpu *v)
+static void svm_restore_dr(struct vcpu *v)
{
if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
__restore_debug_registers(v);
@@ -636,11 +685,8 @@ static unsigned long svm_get_segment_bas
static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int long_mode = 0;
-
-#ifdef __x86_64__
- long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#endif
+ int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
+
switch ( seg )
{
case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base;
@@ -1123,7 +1169,7 @@ static void svm_vmexit_do_cpuid(struct v
__update_guest_eip(vmcb, inst_len);
}
-static inline unsigned long *get_reg_p(
+static unsigned long *get_reg_p(
unsigned int gpreg,
struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
@@ -1188,7 +1234,7 @@ static inline unsigned long *get_reg_p(
}
-static inline unsigned long get_reg(
+static unsigned long get_reg(
unsigned int gpreg, struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
unsigned long *gp;
@@ -1197,7 +1243,7 @@ static inline unsigned long get_reg(
}
-static inline void set_reg(
+static void set_reg(
unsigned int gpreg, unsigned long value,
struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
@@ -1300,7 +1346,7 @@ static void svm_get_prefix_info(struct v
/* Get the address of INS/OUTS instruction */
-static inline int svm_get_io_address(
+static int svm_get_io_address(
struct vcpu *v, struct cpu_user_regs *regs,
unsigned int size, ioio_info_t info,
unsigned long *count, unsigned long *addr)
@@ -1311,10 +1357,8 @@ static inline int svm_get_io_address(
svm_segment_register_t *seg = NULL;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-#ifdef __x86_64__
- /* If we're in long mode, we shouldn't check the segment presence & limit
*/
+ /* If we're in long mode, don't check the segment presence & limit */
long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#endif
/* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit.
* l field combined with EFER_LMA says whether it's 16 or 64 bit.
@@ -1641,7 +1685,6 @@ static int svm_set_cr0(unsigned long val
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
-#if defined(__x86_64__)
if ( svm_lme_is_set(v) )
{
if ( !svm_cr4_pae_is_set(v) )
@@ -1654,7 +1697,6 @@ static int svm_set_cr0(unsigned long val
v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
vmcb->efer |= EFER_LMA | EFER_LME;
}
-#endif /* __x86_64__ */
if ( !paging_mode_hap(v->domain) )
{
@@ -2067,7 +2109,7 @@ static int svm_cr_access(struct vcpu *v,
return result;
}
-static inline void svm_do_msr_access(
+static void svm_do_msr_access(
struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -2170,7 +2212,7 @@ static inline void svm_do_msr_access(
__update_guest_eip(vmcb, inst_len);
}
-static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
+static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
{
enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
diff -r 41c8284cfc0c -r 50c18666d660 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Sat Jul 07 11:08:57 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/svm.h Sat Jul 07 11:21:34 2007 +0100
@@ -30,46 +30,6 @@
extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
-static inline int svm_long_mode_enabled(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LMA;
-}
-
-static inline int svm_lme_is_set(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LME;
-}
-
-static inline int svm_cr4_pae_is_set(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return guest_cr4 & X86_CR4_PAE;
-}
-
-static inline int svm_paging_enabled(struct vcpu *v)
-{
- unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
-}
-
-static inline int svm_pae_enabled(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
-}
-
-static inline int svm_nx_enabled(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
-}
-
-static inline int svm_pgbit_test(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
-}
-
#define SVM_REG_EAX (0)
#define SVM_REG_ECX (1)
#define SVM_REG_EDX (2)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|