[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 2/3] xen/oprofile: use NMI continuation for sending virq to guest



Instead of calling send_guest_vcpu_virq() from NMI context use the
NMI continuation framework for that purpose. This avoids taking locks
in NMI mode.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V5:
- use Linux coding style (Jan Beulich)
- assume races could happen (Jan Beulich)

V4:
- rework to less generic approach

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/oprofile/nmi_int.c | 19 +++++++++++++++++--
 xen/arch/x86/traps.c            |  4 ++++
 xen/include/asm-x86/xenoprof.h  |  7 +++++++
 3 files changed, 28 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/oprofile/nmi_int.c b/xen/arch/x86/oprofile/nmi_int.c
index 0f103d80a6..a13bd82915 100644
--- a/xen/arch/x86/oprofile/nmi_int.c
+++ b/xen/arch/x86/oprofile/nmi_int.c
@@ -38,6 +38,8 @@ static unsigned long saved_lvtpc[NR_CPUS];
 
 static char *cpu_type;
 
+static DEFINE_PER_CPU(struct vcpu *, nmi_cont_vcpu);
+
 static int passive_domain_msr_op_checks(unsigned int msr, int *typep, int 
*indexp)
 {
        struct vpmu_struct *vpmu = vcpu_vpmu(current);
@@ -83,14 +85,27 @@ void passive_domain_destroy(struct vcpu *v)
                model->free_msr(v);
 }
 
+bool nmi_oprofile_send_virq(void)
+{
+       struct vcpu *v = xchg(&this_cpu(nmi_cont_vcpu), NULL);
+
+       if (v)
+               send_guest_vcpu_virq(v, VIRQ_XENOPROF);
+
+       return v;
+}
+
 static int nmi_callback(const struct cpu_user_regs *regs, int cpu)
 {
        int xen_mode, ovf;
 
        ovf = model->check_ctrs(cpu, &cpu_msrs[cpu], regs);
        xen_mode = ring_0(regs);
-       if ( ovf && is_active(current->domain) && !xen_mode )
-               send_guest_vcpu_virq(current, VIRQ_XENOPROF);
+       if (ovf && is_active(current->domain) && !xen_mode &&
+           !this_cpu(nmi_cont_vcpu)) {
+               this_cpu(nmi_cont_vcpu) = current;
+               trigger_nmi_continuation();
+       }
 
        if ( ovf == 2 )
                current->arch.nmi_pending = true;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 5cbaa49031..240fd1b089 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -65,6 +65,7 @@
 #include <asm/debugger.h>
 #include <asm/msr.h>
 #include <asm/nmi.h>
+#include <asm/xenoprof.h>
 #include <asm/shared.h>
 #include <asm/x86_emulate.h>
 #include <asm/traps.h>
@@ -1805,6 +1806,9 @@ bool nmi_check_continuation(void)
 {
     bool ret = false;
 
+    if ( nmi_oprofile_send_virq() )
+        ret = true;
+
     return ret;
 }
 
diff --git a/xen/include/asm-x86/xenoprof.h b/xen/include/asm-x86/xenoprof.h
index 1026ba2e1f..cf6af8c5df 100644
--- a/xen/include/asm-x86/xenoprof.h
+++ b/xen/include/asm-x86/xenoprof.h
@@ -69,6 +69,8 @@ int passive_domain_do_rdmsr(unsigned int msr, uint64_t 
*msr_content);
 int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content);
 void passive_domain_destroy(struct vcpu *v);
 
+bool nmi_oprofile_send_virq(void);
+
 #else
 
 static inline int passive_domain_do_rdmsr(unsigned int msr,
@@ -85,6 +87,11 @@ static inline int passive_domain_do_wrmsr(unsigned int msr,
 
 static inline void passive_domain_destroy(struct vcpu *v) {}
 
+static inline bool nmi_oprofile_send_virq(void)
+{
+    return false;
+}
+
 #endif /* CONFIG_XENOPROF */
 
 #endif /* __ASM_X86_XENOPROF_H__ */
-- 
2.26.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.