[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC v4 1/2] x86/xen: add xen_is_preemptible_hypercall()



From: "Luis R. Rodriguez" <mcgrof@xxxxxxxx>

On kernels with voluntary or no preemption we can run
into situations where a hypercall issued through userspace
will linger around as it addresses sub-operatiosn in kernel
context (multicalls). Such operations can trigger soft lockup
detection.

We want to address a way to let the kernel voluntarily preempt
such calls even on non preempt kernels, to address this we first
need to distinguish which hypercalls fall under this category.
This implements xen_is_preemptible_hypercall() which lets us do
just that by adding a secondary hypercall page, calls made via
the new page may be preempted.

Andrew had originally submitted a version of this work [0].

[0] http://lists.xen.org/archives/html/xen-devel/2014-02/msg01056.html

Based on original work by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: David Vrabel <david.vrabel@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@xxxxxxxxxxx>
Cc: Jan Beulich <JBeulich@xxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Signed-off-by: Luis R. Rodriguez <mcgrof@xxxxxxxx>
---
 arch/arm/include/asm/xen/hypercall.h |  5 +++++
 arch/x86/include/asm/xen/hypercall.h | 20 ++++++++++++++++++++
 arch/x86/xen/enlighten.c             |  7 +++++++
 arch/x86/xen/xen-head.S              | 18 +++++++++++++++++-
 4 files changed, 49 insertions(+), 1 deletion(-)

diff --git a/arch/arm/include/asm/xen/hypercall.h 
b/arch/arm/include/asm/xen/hypercall.h
index 712b50e..4fc8395 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -74,4 +74,9 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct 
mmu_update *req,
        BUG();
 }
 
+static inline bool xen_is_preemptible_hypercall(struct pt_regs *regs)
+{
+       return false;
+}
+
 #endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h 
b/arch/x86/include/asm/xen/hypercall.h
index ca08a27..221008e 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -84,6 +84,22 @@
 
 extern struct { char _entry[32]; } hypercall_page[];
 
+#ifndef CONFIG_PREEMPT
+extern struct { char _entry[32]; } preemptible_hypercall_page[];
+
+static inline bool xen_is_preemptible_hypercall(struct pt_regs *regs)
+{
+       return !user_mode_vm(regs) &&
+               regs->ip >= (unsigned long)preemptible_hypercall_page &&
+               regs->ip < (unsigned long)preemptible_hypercall_page + 
PAGE_SIZE;
+}
+#else
+static inline bool xen_is_preemptible_hypercall(struct pt_regs *regs)
+{
+       return false;
+}
+#endif
+
 #define __HYPERCALL            "call hypercall_page+%c[offset]"
 #define __HYPERCALL_ENTRY(x)                                           \
        [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
@@ -215,7 +231,11 @@ privcmd_call(unsigned call,
 
        asm volatile("call *%[call]"
                     : __HYPERCALL_5PARAM
+#ifndef CONFIG_PREEMPT
+                    : [call] "a" (&preemptible_hypercall_page[call])
+#else
                     : [call] "a" (&hypercall_page[call])
+#endif
                     : __HYPERCALL_CLOBBER5);
 
        return (long)__res;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 6bf3a13..9c01b48 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -84,6 +84,9 @@
 #include "multicalls.h"
 
 EXPORT_SYMBOL_GPL(hypercall_page);
+#ifndef CONFIG_PREEMPT
+EXPORT_SYMBOL_GPL(preemptible_hypercall_page);
+#endif
 
 /*
  * Pointer to the xen_vcpu_info structure or
@@ -1531,6 +1534,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
 #endif
        xen_setup_machphys_mapping();
 
+#ifndef CONFIG_PREEMPT
+       copy_page(preemptible_hypercall_page, hypercall_page);
+#endif
+
        /* Install Xen paravirt ops */
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 674b2225..6e6a9517 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -85,9 +85,18 @@ ENTRY(xen_pvh_early_cpu_init)
 .pushsection .text
        .balign PAGE_SIZE
 ENTRY(hypercall_page)
+
+#ifdef CONFIG_PREEMPT
+#  define PREEMPT_HYPERCALL_ENTRY(x)
+#else
+#  define PREEMPT_HYPERCALL_ENTRY(x) \
+       .global xen_hypercall_##x ## _p ASM_NL \
+       .set preemptible_xen_hypercall_##x, xen_hypercall_##x + PAGE_SIZE ASM_NL
+#endif
 #define NEXT_HYPERCALL(x) \
        ENTRY(xen_hypercall_##x) \
-       .skip 32
+       .skip 32 ASM_NL \
+       PREEMPT_HYPERCALL_ENTRY(x)
 
 NEXT_HYPERCALL(set_trap_table)
 NEXT_HYPERCALL(mmu_update)
@@ -138,6 +147,13 @@ NEXT_HYPERCALL(arch_4)
 NEXT_HYPERCALL(arch_5)
 NEXT_HYPERCALL(arch_6)
        .balign PAGE_SIZE
+
+#ifndef CONFIG_PREEMPT
+ENTRY(preemptible_hypercall_page)
+       .skip PAGE_SIZE
+#endif /* CONFIG_PREEMPT */
+
+#undef NEXT_HYPERCALL
 .popsection
 
        ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")
-- 
2.1.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.