- share more code between 32- and 64-bit variants
- properly handle continuations for 32-bit guests on 64-bit hv
- properly handle preemption (this must *not* rely on regs->eip, as other
code may overwrite the value there by calling hvm_store_cpu_guest_regs()
- deny hypercall access when called from guest in vm86 mode, which requires
that ???_guest_x86_mode() make real and vm86 modes distinguishable
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: 2007-05-14/xen/arch/x86/domain.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/domain.c 2007-05-14 13:43:44.000000000
+0200
+++ 2007-05-14/xen/arch/x86/domain.c 2007-05-14 14:27:23.000000000 +0200
@@ -38,6 +38,7 @@
#include <asm/mpspec.h>
#include <asm/ldt.h>
#include <asm/paging.h>
+#include <asm/hypercall.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
#include <asm/msr.h>
@@ -1234,6 +1235,8 @@ void sync_vcpu_execstate(struct vcpu *v)
__arg; \
})
+DEFINE_PER_CPU(char, hc_preempted);
+
unsigned long hypercall_create_continuation(
unsigned int op, const char *format, ...)
{
@@ -1265,7 +1268,9 @@ unsigned long hypercall_create_continuat
regs->eip -= 2; /* re-execute 'syscall' / 'int 0x82' */
#ifdef __x86_64__
- if ( !is_pv_32on64_domain(current->domain) )
+ if ( !is_hvm_vcpu(current) ?
+ !is_pv_32on64_vcpu(current) :
+ hvm_guest_x86_mode(current) == 8 )
{
for ( i = 0; *p != '\0'; i++ )
{
@@ -1301,6 +1306,8 @@ unsigned long hypercall_create_continuat
}
}
}
+
+ this_cpu(hc_preempted) = 1;
}
va_end(args);
Index: 2007-05-14/xen/arch/x86/hvm/hvm.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/hvm.c 2007-05-14 13:47:02.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/hvm.c 2007-05-14 14:21:26.000000000 +0200
@@ -663,7 +663,7 @@ typedef unsigned long hvm_hypercall_t(
#if defined(__i386__)
-static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
+static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
HYPERCALL(memory_op),
HYPERCALL(multicall),
HYPERCALL(xen_version),
@@ -672,21 +672,6 @@ static hvm_hypercall_t *hvm_hypercall_ta
HYPERCALL(hvm_op)
};
-static void __hvm_do_hypercall(struct cpu_user_regs *pregs)
-{
- if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
- {
- if ( pregs->eax != __HYPERVISOR_grant_table_op )
- gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d bad hypercall %d.\n",
- current->domain->domain_id, current->vcpu_id, pregs->eax);
- pregs->eax = -ENOSYS;
- return;
- }
-
- pregs->eax = hvm_hypercall_table[pregs->eax](
- pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
-}
-
#else /* defined(__x86_64__) */
static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
@@ -746,49 +731,38 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(hvm_op)
};
-static void __hvm_do_hypercall(struct cpu_user_regs *pregs)
-{
- pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
- if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
- {
- if ( pregs->rax != __HYPERVISOR_grant_table_op )
- gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d bad hypercall %ld.\n",
- current->domain->domain_id, current->vcpu_id, pregs->rax);
- pregs->rax = -ENOSYS;
- return;
- }
-
- if ( current->arch.paging.mode->guest_levels == 4 )
- {
- pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
- pregs->rsi,
- pregs->rdx,
- pregs->r10,
- pregs->r8);
- }
- else
- {
- pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
- (uint32_t)pregs->ecx,
- (uint32_t)pregs->edx,
- (uint32_t)pregs->esi,
- (uint32_t)pregs->edi);
- }
-}
-
#endif /* defined(__x86_64__) */
int hvm_do_hypercall(struct cpu_user_regs *regs)
{
- int flush, preempted;
- unsigned long old_eip;
+ int flush, mode = hvm_guest_x86_mode(current);
+ uint32_t eax = regs->eax;
- hvm_store_cpu_guest_regs(current, regs, NULL);
+ switch ( mode )
+ {
+#ifdef __x86_64__
+ case 8:
+#endif
+ case 4:
+ case 2:
+ hvm_store_cpu_guest_regs(current, regs, NULL);
+ if ( unlikely(ring_3(regs)) )
+ {
+ default:
+ regs->eax = -EPERM;
+ return HVM_HCALL_completed;
+ }
+ case 0:
+ break;
+ }
- if ( unlikely(ring_3(regs)) )
+ if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] )
{
- regs->eax = -EPERM;
- return 0;
+ if ( eax != __HYPERVISOR_grant_table_op )
+ gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d bad hypercall %u.\n",
+ current->domain->domain_id, current->vcpu_id, eax);
+ regs->eax = -ENOSYS;
+ return HVM_HCALL_completed;
}
/*
@@ -796,20 +770,29 @@ int hvm_do_hypercall(struct cpu_user_reg
* For now we also need to flush when pages are added, as qemu-dm is not
* yet capable of faulting pages into an existing valid mapcache bucket.
*/
- flush = ((uint32_t)regs->eax == __HYPERVISOR_memory_op);
-
- /* Check for preemption: RIP will be modified from this dummy value. */
- old_eip = regs->eip;
- regs->eip = 0xF0F0F0FF;
-
- __hvm_do_hypercall(regs);
+ flush = (eax == __HYPERVISOR_memory_op);
+ this_cpu(hc_preempted) = 0;
- preempted = (regs->eip != 0xF0F0F0FF);
- regs->eip = old_eip;
-
- hvm_load_cpu_guest_regs(current, regs);
+#ifdef __x86_64__
+ if ( mode == 8 )
+ {
+ regs->rax = hvm_hypercall64_table[eax](regs->rdi,
+ regs->rsi,
+ regs->rdx,
+ regs->r10,
+ regs->r8);
+ }
+ else
+#endif
+ {
+ regs->eax = hvm_hypercall32_table[eax]((uint32_t)regs->ebx,
+ (uint32_t)regs->ecx,
+ (uint32_t)regs->edx,
+ (uint32_t)regs->esi,
+ (uint32_t)regs->edi);
+ }
- return (preempted ? HVM_HCALL_preempted :
+ return (this_cpu(hc_preempted) ? HVM_HCALL_preempted :
flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
}
Index: 2007-05-14/xen/arch/x86/hvm/platform.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/platform.c 2007-04-23 10:01:41.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/platform.c 2007-05-14 13:47:25.000000000
+0200
@@ -1037,6 +1037,9 @@ void handle_mmio(unsigned long gpa)
df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
address_bytes = hvm_guest_x86_mode(v);
+ if (address_bytes < 2)
+ /* real or vm86 modes */
+ address_bytes = 2;
inst_addr = hvm_get_segment_base(v, x86_seg_cs) + regs->eip;
inst_len = hvm_instruction_length(inst_addr, address_bytes);
if ( inst_len <= 0 )
Index: 2007-05-14/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/svm/svm.c 2007-05-03 09:45:09.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/svm/svm.c 2007-05-14 13:47:25.000000000
+0200
@@ -563,14 +563,6 @@ static inline void svm_restore_dr(struct
}
-static int svm_realmode(struct vcpu *v)
-{
- unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
-
- return (eflags & X86_EFLAGS_VM) || !(cr0 & X86_CR0_PE);
-}
-
static int svm_interrupts_enabled(struct vcpu *v)
{
unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
@@ -581,13 +573,13 @@ static int svm_guest_x86_mode(struct vcp
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- if ( svm_long_mode_enabled(v) && vmcb->cs.attr.fields.l )
+ if ( unlikely(!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PE)) )
+ return 0;
+ if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
+ return 1;
+ if ( svm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
-
- if ( svm_realmode(v) )
- return 2;
-
- return (vmcb->cs.attr.fields.db ? 4 : 2);
+ return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
}
void svm_update_host_cr3(struct vcpu *v)
Index: 2007-05-14/xen/arch/x86/hvm/vmx/vmx.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/vmx/vmx.c 2007-05-03 09:45:09.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/vmx/vmx.c 2007-05-14 13:47:25.000000000
+0200
@@ -995,31 +995,20 @@ static void vmx_init_hypercall_page(stru
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
-static int vmx_realmode(struct vcpu *v)
-{
- unsigned long rflags;
-
- ASSERT(v == current);
-
- rflags = __vmread(GUEST_RFLAGS);
- return rflags & X86_EFLAGS_VM;
-}
-
static int vmx_guest_x86_mode(struct vcpu *v)
{
- unsigned long cs_ar_bytes;
+ unsigned int cs_ar_bytes;
ASSERT(v == current);
+ if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
+ return 0;
+ if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
+ return 1;
cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
-
- if ( vmx_long_mode_enabled(v) && (cs_ar_bytes & (1u<<13)) )
+ if ( vmx_long_mode_enabled(v) && likely(cs_ar_bytes & (1u<<13)) )
return 8;
-
- if ( vmx_realmode(v) )
- return 2;
-
- return ((cs_ar_bytes & (1u<<14)) ? 4 : 2);
+ return (likely(cs_ar_bytes & (1u<<14)) ? 4 : 2);
}
static int vmx_pae_enabled(struct vcpu *v)
Index: 2007-05-14/xen/include/asm-x86/hypercall.h
===================================================================
--- 2007-05-14.orig/xen/include/asm-x86/hypercall.h 2007-04-23
10:01:46.000000000 +0200
+++ 2007-05-14/xen/include/asm-x86/hypercall.h 2007-05-14 14:26:36.000000000
+0200
@@ -15,6 +15,15 @@
*/
#define MMU_UPDATE_PREEMPTED (~(~0U>>1))
+/*
+ * This gets set to a non-zero value whenever hypercall_create_continuation()
+ * is used (outside of multicall context; in multicall context the second call
+ * from do_multicall() itself will have this effect). Internal callers of
+ * hypercall handlers interested in this condition must clear the flag prior
+ * to invoking the respective handler(s).
+ */
+DECLARE_PER_CPU(char, hc_preempted);
+
extern long
do_event_channel_op_compat(
XEN_GUEST_HANDLE(evtchn_op_t) uop);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|