[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-next v3 18/22] x86/traps: merge x86_64/compat/traps.c into pv/traps.c



Export hypercall_page_initialise_ring1_kernel as the code is moved.

No functional change.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/pv/traps.c            | 405 ++++++++++++++++++++++++++++++++++++
 xen/arch/x86/x86_64/compat/traps.c | 416 -------------------------------------
 xen/arch/x86/x86_64/traps.c        |   2 -
 xen/include/asm-x86/pv/domain.h    |   2 +
 4 files changed, 407 insertions(+), 418 deletions(-)
 delete mode 100644 xen/arch/x86/x86_64/compat/traps.c

diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 4f52d3e4d3..db92f6d520 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -31,6 +31,9 @@
 #include <asm/shared.h>
 #include <asm/traps.h>
 
+#include <compat/callback.h>
+#include <compat/arch-x86_32.h>
+
 #include <public/callback.h>
 
 void do_entry_int82(struct cpu_user_regs *regs)
@@ -616,6 +619,408 @@ void hypercall_page_initialise_ring3_kernel(void 
*hypercall_page)
     *(u16 *)(p+ 9) = 0x050f;  /* syscall */
 }
 
+/* Compat guest interfaces */
+
+void compat_show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs,
+                             int debug_stack_lines)
+{
+    unsigned int i, *stack, addr, mask = STACK_SIZE;
+
+    stack = (unsigned int *)(unsigned long)regs->esp;
+    printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
+
+    if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
+    {
+        printk("Guest-inaccessible memory.\n");
+        return;
+    }
+
+    if ( v != current )
+    {
+        struct vcpu *vcpu;
+        unsigned long mfn;
+
+        ASSERT(guest_kernel_mode(v, regs));
+        mfn = read_cr3() >> PAGE_SHIFT;
+        for_each_vcpu( v->domain, vcpu )
+            if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn )
+                break;
+        if ( !vcpu )
+        {
+            stack = do_page_walk(v, (unsigned long)stack);
+            if ( (unsigned long)stack < PAGE_SIZE )
+            {
+                printk("Inaccessible guest memory.\n");
+                return;
+            }
+            mask = PAGE_SIZE;
+        }
+    }
+
+    for ( i = 0; i < debug_stack_lines * 8; i++ )
+    {
+        if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask )
+            break;
+        if ( __get_user(addr, stack) )
+        {
+            if ( i != 0 )
+                printk("\n    ");
+            printk("Fault while accessing guest memory.");
+            i = 1;
+            break;
+        }
+        if ( (i != 0) && ((i % 8) == 0) )
+            printk("\n ");
+        printk(" %08x", addr);
+        stack++;
+    }
+    if ( mask == PAGE_SIZE )
+    {
+        BUILD_BUG_ON(PAGE_SIZE == STACK_SIZE);
+        unmap_domain_page(stack);
+    }
+    if ( i == 0 )
+        printk("Stack empty.");
+    printk("\n");
+}
+
+unsigned int compat_iret(void)
+{
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+    struct vcpu *v = current;
+    u32 eflags;
+
+    /* Trim stack pointer to 32 bits. */
+    regs->rsp = (u32)regs->rsp;
+
+    /* Restore EAX (clobbered by hypercall). */
+    if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
+    {
+        domain_crash(v->domain);
+        return 0;
+    }
+
+    /* Restore CS and EIP. */
+    if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
+        unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
+    {
+        domain_crash(v->domain);
+        return 0;
+    }
+
+    /*
+     * Fix up and restore EFLAGS. We fix up in a local staging area
+     * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
+     */
+    if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
+    {
+        domain_crash(v->domain);
+        return 0;
+    }
+
+    if ( VM_ASSIST(v->domain, architectural_iopl) )
+        v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
+
+    regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+
+    if ( unlikely(eflags & X86_EFLAGS_VM) )
+    {
+        /*
+         * Cannot return to VM86 mode: inject a GP fault instead. Note that
+         * the GP fault is reported on the first VM86 mode instruction, not on
+         * the IRET (which is why we can simply leave the stack frame as-is
+         * (except for perhaps having to copy it), which in turn seems better
+         * than teaching create_bounce_frame() to needlessly deal with vm86
+         * mode frames).
+         */
+        const struct trap_info *ti;
+        u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
+        unsigned int i;
+        int rc = 0;
+
+        gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
+                 regs->esp, ksp);
+        if ( ksp < regs->esp )
+        {
+            for (i = 1; i < 10; ++i)
+            {
+                rc |= __get_user(x, (u32 *)regs->rsp + i);
+                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
+            }
+        }
+        else if ( ksp > regs->esp )
+        {
+            for ( i = 9; i > 0; --i )
+            {
+                rc |= __get_user(x, (u32 *)regs->rsp + i);
+                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
+            }
+        }
+        if ( rc )
+        {
+            domain_crash(v->domain);
+            return 0;
+        }
+        regs->esp = ksp;
+        regs->ss = v->arch.pv_vcpu.kernel_ss;
+
+        ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
+        if ( TI_GET_IF(ti) )
+            eflags &= ~X86_EFLAGS_IF;
+        regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+                          X86_EFLAGS_NT|X86_EFLAGS_TF);
+        if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
+        {
+            domain_crash(v->domain);
+            return 0;
+        }
+        regs->eip = ti->address;
+        regs->cs = ti->cs;
+    }
+    else if ( unlikely(ring_0(regs)) )
+    {
+        domain_crash(v->domain);
+        return 0;
+    }
+    else if ( ring_1(regs) )
+        regs->esp += 16;
+    /* Return to ring 2/3: restore ESP and SS. */
+    else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
+              __get_user(regs->esp, (u32 *)regs->rsp + 4) )
+    {
+        domain_crash(v->domain);
+        return 0;
+    }
+
+    /* Restore upcall mask from supplied EFLAGS.IF. */
+    vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
+
+    async_exception_cleanup(v);
+
+    /*
+     * The hypercall exit path will overwrite EAX with this return
+     * value.
+     */
+    return regs->eax;
+}
+
+static long compat_register_guest_callback(
+    struct compat_callback_register *reg)
+{
+    long ret = 0;
+    struct vcpu *v = current;
+
+    fixup_guest_code_selector(v->domain, reg->address.cs);
+
+    switch ( reg->type )
+    {
+    case CALLBACKTYPE_event:
+        v->arch.pv_vcpu.event_callback_cs     = reg->address.cs;
+        v->arch.pv_vcpu.event_callback_eip    = reg->address.eip;
+        break;
+
+    case CALLBACKTYPE_failsafe:
+        v->arch.pv_vcpu.failsafe_callback_cs  = reg->address.cs;
+        v->arch.pv_vcpu.failsafe_callback_eip = reg->address.eip;
+        if ( reg->flags & CALLBACKF_mask_events )
+            set_bit(_VGCF_failsafe_disables_events,
+                    &v->arch.vgc_flags);
+        else
+            clear_bit(_VGCF_failsafe_disables_events,
+                      &v->arch.vgc_flags);
+        break;
+
+    case CALLBACKTYPE_syscall32:
+        v->arch.pv_vcpu.syscall32_callback_cs     = reg->address.cs;
+        v->arch.pv_vcpu.syscall32_callback_eip    = reg->address.eip;
+        v->arch.pv_vcpu.syscall32_disables_events =
+            (reg->flags & CALLBACKF_mask_events) != 0;
+        break;
+
+    case CALLBACKTYPE_sysenter:
+        v->arch.pv_vcpu.sysenter_callback_cs     = reg->address.cs;
+        v->arch.pv_vcpu.sysenter_callback_eip    = reg->address.eip;
+        v->arch.pv_vcpu.sysenter_disables_events =
+            (reg->flags & CALLBACKF_mask_events) != 0;
+        break;
+
+    case CALLBACKTYPE_nmi:
+        ret = register_guest_nmi_callback(reg->address.eip);
+        break;
+
+    default:
+        ret = -ENOSYS;
+        break;
+    }
+
+    return ret;
+}
+
+static long compat_unregister_guest_callback(
+    struct compat_callback_unregister *unreg)
+{
+    long ret;
+
+    switch ( unreg->type )
+    {
+    case CALLBACKTYPE_event:
+    case CALLBACKTYPE_failsafe:
+    case CALLBACKTYPE_syscall32:
+    case CALLBACKTYPE_sysenter:
+        ret = -EINVAL;
+        break;
+
+    case CALLBACKTYPE_nmi:
+        ret = unregister_guest_nmi_callback();
+        break;
+
+    default:
+        ret = -ENOSYS;
+        break;
+    }
+
+    return ret;
+}
+
+long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+    long ret;
+
+    switch ( cmd )
+    {
+    case CALLBACKOP_register:
+    {
+        struct compat_callback_register reg;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&reg, arg, 1) )
+            break;
+
+        ret = compat_register_guest_callback(&reg);
+    }
+    break;
+
+    case CALLBACKOP_unregister:
+    {
+        struct compat_callback_unregister unreg;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&unreg, arg, 1) )
+            break;
+
+        ret = compat_unregister_guest_callback(&unreg);
+    }
+    break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
+long compat_set_callbacks(unsigned long event_selector,
+                          unsigned long event_address,
+                          unsigned long failsafe_selector,
+                          unsigned long failsafe_address)
+{
+    struct compat_callback_register event = {
+        .type = CALLBACKTYPE_event,
+        .address = {
+            .cs = event_selector,
+            .eip = event_address
+        }
+    };
+    struct compat_callback_register failsafe = {
+        .type = CALLBACKTYPE_failsafe,
+        .address = {
+            .cs = failsafe_selector,
+            .eip = failsafe_address
+        }
+    };
+
+    compat_register_guest_callback(&event);
+    compat_register_guest_callback(&failsafe);
+
+    return 0;
+}
+
+int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
+{
+    struct compat_trap_info cur;
+    struct trap_info *dst = current->arch.pv_vcpu.trap_ctxt;
+    long rc = 0;
+
+    /* If no table is presented then clear the entire virtual IDT. */
+    if ( guest_handle_is_null(traps) )
+    {
+        memset(dst, 0, NR_VECTORS * sizeof(*dst));
+        return 0;
+    }
+
+    for ( ; ; )
+    {
+        if ( copy_from_guest(&cur, traps, 1) )
+        {
+            rc = -EFAULT;
+            break;
+        }
+
+        if ( cur.address == 0 )
+            break;
+
+        fixup_guest_code_selector(current->domain, cur.cs);
+
+        XLAT_trap_info(dst + cur.vector, &cur);
+
+        if ( cur.vector == 0x80 )
+            init_int80_direct_trap(current);
+
+        guest_handle_add_offset(traps, 1);
+
+        if ( hypercall_preempt_check() )
+        {
+            rc = hypercall_create_continuation(
+                __HYPERVISOR_set_trap_table, "h", traps);
+            break;
+        }
+    }
+
+    return rc;
+}
+
+void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
+{
+    char *p;
+    int i;
+
+    /* Fill in all the transfer points with template machine code. */
+
+    for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+    {
+        if ( i == __HYPERVISOR_iret )
+            continue;
+
+        p = (char *)(hypercall_page + (i * 32));
+        *(u8  *)(p+ 0) = 0xb8;    /* mov  $<i>,%eax */
+        *(u32 *)(p+ 1) = i;
+        *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
+        *(u8  *)(p+ 7) = 0xc3;    /* ret */
+    }
+
+    /*
+     * HYPERVISOR_iret is special because it doesn't return and expects a
+     * special stack frame. Guests jump at this transfer point instead of
+     * calling it.
+     */
+    p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
+    *(u8  *)(p+ 0) = 0x50;    /* push %eax */
+    *(u8  *)(p+ 1) = 0xb8;    /* mov  $__HYPERVISOR_iret,%eax */
+    *(u32 *)(p+ 2) = __HYPERVISOR_iret;
+    *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/x86_64/compat/traps.c 
b/xen/arch/x86/x86_64/compat/traps.c
deleted file mode 100644
index 1751ec67e8..0000000000
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ /dev/null
@@ -1,416 +0,0 @@
-#include <xen/event.h>
-#include <asm/regs.h>
-#include <compat/callback.h>
-#include <compat/arch-x86_32.h>
-
-void compat_show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs,
-                             int debug_stack_lines)
-{
-    unsigned int i, *stack, addr, mask = STACK_SIZE;
-
-    stack = (unsigned int *)(unsigned long)regs->esp;
-    printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
-
-    if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
-    {
-        printk("Guest-inaccessible memory.\n");
-        return;
-    }
-
-    if ( v != current )
-    {
-        struct vcpu *vcpu;
-        unsigned long mfn;
-
-        ASSERT(guest_kernel_mode(v, regs));
-        mfn = read_cr3() >> PAGE_SHIFT;
-        for_each_vcpu( v->domain, vcpu )
-            if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn )
-                break;
-        if ( !vcpu )
-        {
-            stack = do_page_walk(v, (unsigned long)stack);
-            if ( (unsigned long)stack < PAGE_SIZE )
-            {
-                printk("Inaccessible guest memory.\n");
-                return;
-            }
-            mask = PAGE_SIZE;
-        }
-    }
-
-    for ( i = 0; i < debug_stack_lines * 8; i++ )
-    {
-        if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask )
-            break;
-        if ( __get_user(addr, stack) )
-        {
-            if ( i != 0 )
-                printk("\n    ");
-            printk("Fault while accessing guest memory.");
-            i = 1;
-            break;
-        }
-        if ( (i != 0) && ((i % 8) == 0) )
-            printk("\n ");
-        printk(" %08x", addr);
-        stack++;
-    }
-    if ( mask == PAGE_SIZE )
-    {
-        BUILD_BUG_ON(PAGE_SIZE == STACK_SIZE);
-        unmap_domain_page(stack);
-    }
-    if ( i == 0 )
-        printk("Stack empty.");
-    printk("\n");
-}
-
-unsigned int compat_iret(void)
-{
-    struct cpu_user_regs *regs = guest_cpu_user_regs();
-    struct vcpu *v = current;
-    u32 eflags;
-
-    /* Trim stack pointer to 32 bits. */
-    regs->rsp = (u32)regs->rsp;
-
-    /* Restore EAX (clobbered by hypercall). */
-    if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
-    {
-        domain_crash(v->domain);
-        return 0;
-    }
-
-    /* Restore CS and EIP. */
-    if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
-        unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
-    {
-        domain_crash(v->domain);
-        return 0;
-    }
-
-    /*
-     * Fix up and restore EFLAGS. We fix up in a local staging area
-     * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
-     */
-    if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
-    {
-        domain_crash(v->domain);
-        return 0;
-    }
-
-    if ( VM_ASSIST(v->domain, architectural_iopl) )
-        v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
-
-    regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
-
-    if ( unlikely(eflags & X86_EFLAGS_VM) )
-    {
-        /*
-         * Cannot return to VM86 mode: inject a GP fault instead. Note that
-         * the GP fault is reported on the first VM86 mode instruction, not on
-         * the IRET (which is why we can simply leave the stack frame as-is
-         * (except for perhaps having to copy it), which in turn seems better
-         * than teaching create_bounce_frame() to needlessly deal with vm86
-         * mode frames).
-         */
-        const struct trap_info *ti;
-        u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
-        unsigned int i;
-        int rc = 0;
-
-        gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
-                 regs->esp, ksp);
-        if ( ksp < regs->esp )
-        {
-            for (i = 1; i < 10; ++i)
-            {
-                rc |= __get_user(x, (u32 *)regs->rsp + i);
-                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
-            }
-        }
-        else if ( ksp > regs->esp )
-        {
-            for ( i = 9; i > 0; --i )
-            {
-                rc |= __get_user(x, (u32 *)regs->rsp + i);
-                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
-            }
-        }
-        if ( rc )
-        {
-            domain_crash(v->domain);
-            return 0;
-        }
-        regs->esp = ksp;
-        regs->ss = v->arch.pv_vcpu.kernel_ss;
-
-        ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
-        if ( TI_GET_IF(ti) )
-            eflags &= ~X86_EFLAGS_IF;
-        regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
-                          X86_EFLAGS_NT|X86_EFLAGS_TF);
-        if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
-        {
-            domain_crash(v->domain);
-            return 0;
-        }
-        regs->eip = ti->address;
-        regs->cs = ti->cs;
-    }
-    else if ( unlikely(ring_0(regs)) )
-    {
-        domain_crash(v->domain);
-        return 0;
-    }
-    else if ( ring_1(regs) )
-        regs->esp += 16;
-    /* Return to ring 2/3: restore ESP and SS. */
-    else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
-              __get_user(regs->esp, (u32 *)regs->rsp + 4) )
-    {
-        domain_crash(v->domain);
-        return 0;
-    }
-
-    /* Restore upcall mask from supplied EFLAGS.IF. */
-    vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
-
-    async_exception_cleanup(v);
-
-    /*
-     * The hypercall exit path will overwrite EAX with this return
-     * value.
-     */
-    return regs->eax;
-}
-
-static long compat_register_guest_callback(
-    struct compat_callback_register *reg)
-{
-    long ret = 0;
-    struct vcpu *v = current;
-
-    fixup_guest_code_selector(v->domain, reg->address.cs);
-
-    switch ( reg->type )
-    {
-    case CALLBACKTYPE_event:
-        v->arch.pv_vcpu.event_callback_cs     = reg->address.cs;
-        v->arch.pv_vcpu.event_callback_eip    = reg->address.eip;
-        break;
-
-    case CALLBACKTYPE_failsafe:
-        v->arch.pv_vcpu.failsafe_callback_cs  = reg->address.cs;
-        v->arch.pv_vcpu.failsafe_callback_eip = reg->address.eip;
-        if ( reg->flags & CALLBACKF_mask_events )
-            set_bit(_VGCF_failsafe_disables_events,
-                    &v->arch.vgc_flags);
-        else
-            clear_bit(_VGCF_failsafe_disables_events,
-                      &v->arch.vgc_flags);
-        break;
-
-    case CALLBACKTYPE_syscall32:
-        v->arch.pv_vcpu.syscall32_callback_cs     = reg->address.cs;
-        v->arch.pv_vcpu.syscall32_callback_eip    = reg->address.eip;
-        v->arch.pv_vcpu.syscall32_disables_events =
-            (reg->flags & CALLBACKF_mask_events) != 0;
-        break;
-
-    case CALLBACKTYPE_sysenter:
-        v->arch.pv_vcpu.sysenter_callback_cs     = reg->address.cs;
-        v->arch.pv_vcpu.sysenter_callback_eip    = reg->address.eip;
-        v->arch.pv_vcpu.sysenter_disables_events =
-            (reg->flags & CALLBACKF_mask_events) != 0;
-        break;
-
-    case CALLBACKTYPE_nmi:
-        ret = register_guest_nmi_callback(reg->address.eip);
-        break;
-
-    default:
-        ret = -ENOSYS;
-        break;
-    }
-
-    return ret;
-}
-
-static long compat_unregister_guest_callback(
-    struct compat_callback_unregister *unreg)
-{
-    long ret;
-
-    switch ( unreg->type )
-    {
-    case CALLBACKTYPE_event:
-    case CALLBACKTYPE_failsafe:
-    case CALLBACKTYPE_syscall32:
-    case CALLBACKTYPE_sysenter:
-        ret = -EINVAL;
-        break;
-
-    case CALLBACKTYPE_nmi:
-        ret = unregister_guest_nmi_callback();
-        break;
-
-    default:
-        ret = -ENOSYS;
-        break;
-    }
-
-    return ret;
-}
-
-
-long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
-{
-    long ret;
-
-    switch ( cmd )
-    {
-    case CALLBACKOP_register:
-    {
-        struct compat_callback_register reg;
-
-        ret = -EFAULT;
-        if ( copy_from_guest(&reg, arg, 1) )
-            break;
-
-        ret = compat_register_guest_callback(&reg);
-    }
-    break;
-
-    case CALLBACKOP_unregister:
-    {
-        struct compat_callback_unregister unreg;
-
-        ret = -EFAULT;
-        if ( copy_from_guest(&unreg, arg, 1) )
-            break;
-
-        ret = compat_unregister_guest_callback(&unreg);
-    }
-    break;
-
-    default:
-        ret = -EINVAL;
-        break;
-    }
-
-    return ret;
-}
-
-long compat_set_callbacks(unsigned long event_selector,
-                          unsigned long event_address,
-                          unsigned long failsafe_selector,
-                          unsigned long failsafe_address)
-{
-    struct compat_callback_register event = {
-        .type = CALLBACKTYPE_event,
-        .address = {
-            .cs = event_selector,
-            .eip = event_address
-        }
-    };
-    struct compat_callback_register failsafe = {
-        .type = CALLBACKTYPE_failsafe,
-        .address = {
-            .cs = failsafe_selector,
-            .eip = failsafe_address
-        }
-    };
-
-    compat_register_guest_callback(&event);
-    compat_register_guest_callback(&failsafe);
-
-    return 0;
-}
-
-int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
-{
-    struct compat_trap_info cur;
-    struct trap_info *dst = current->arch.pv_vcpu.trap_ctxt;
-    long rc = 0;
-
-    /* If no table is presented then clear the entire virtual IDT. */
-    if ( guest_handle_is_null(traps) )
-    {
-        memset(dst, 0, NR_VECTORS * sizeof(*dst));
-        init_int80_direct_trap(current);
-        return 0;
-    }
-
-    for ( ; ; )
-    {
-        if ( copy_from_guest(&cur, traps, 1) )
-        {
-            rc = -EFAULT;
-            break;
-        }
-
-        if ( cur.address == 0 )
-            break;
-
-        fixup_guest_code_selector(current->domain, cur.cs);
-
-        XLAT_trap_info(dst + cur.vector, &cur);
-
-        if ( cur.vector == 0x80 )
-            init_int80_direct_trap(current);
-
-        guest_handle_add_offset(traps, 1);
-
-        if ( hypercall_preempt_check() )
-        {
-            rc = hypercall_create_continuation(
-                __HYPERVISOR_set_trap_table, "h", traps);
-            break;
-        }
-    }
-
-    return rc;
-}
-
-static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
-{
-    char *p;
-    int i;
-
-    /* Fill in all the transfer points with template machine code. */
-
-    for ( i = 0; i < (PAGE_SIZE / 32); i++ )
-    {
-        if ( i == __HYPERVISOR_iret )
-            continue;
-
-        p = (char *)(hypercall_page + (i * 32));
-        *(u8  *)(p+ 0) = 0xb8;    /* mov  $<i>,%eax */
-        *(u32 *)(p+ 1) = i;
-        *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
-        *(u8  *)(p+ 7) = 0xc3;    /* ret */
-    }
-
-    /*
-     * HYPERVISOR_iret is special because it doesn't return and expects a
-     * special stack frame. Guests jump at this transfer point instead of
-     * calling it.
-     */
-    p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
-    *(u8  *)(p+ 0) = 0x50;    /* push %eax */
-    *(u8  *)(p+ 1) = 0xb8;    /* mov  $__HYPERVISOR_iret,%eax */
-    *(u32 *)(p+ 2) = __HYPERVISOR_iret;
-    *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 7a4dd4458e..deca2ca1f6 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -335,8 +335,6 @@ void subarch_percpu_traps_init(void)
     wrmsrl(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK);
 }
 
-#include "compat/traps.c"
-
 void hypercall_page_initialise(struct domain *d, void *hypercall_page)
 {
     memset(hypercall_page, 0xCC, PAGE_SIZE);
diff --git a/xen/include/asm-x86/pv/domain.h b/xen/include/asm-x86/pv/domain.h
index dfa60b080c..67e370ebf3 100644
--- a/xen/include/asm-x86/pv/domain.h
+++ b/xen/include/asm-x86/pv/domain.h
@@ -30,6 +30,7 @@ int pv_domain_initialise(struct domain *d, unsigned int 
domcr_flags,
                          struct xen_arch_domainconfig *config);
 
 void hypercall_page_initialise_ring3_kernel(void *hypercall_page);
+void hypercall_page_initialise_ring1_kernel(void *hypercall_page);
 
 #else  /* !CONFIG_PV */
 
@@ -46,6 +47,7 @@ static inline int pv_domain_initialise(struct domain *d,
 }
 
 void hypercall_page_initialise_ring3_kernel(void *hypercall_page) {}
+void hypercall_page_initialise_ring1_kernel(void *hypercall_page) {}
 
 #endif /* CONFIG_PV */
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.