WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Architecture-independent, and tasklet-bas

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Architecture-independent, and tasklet-based, continue_hypercall_on_cpu().
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 15 Apr 2010 01:35:13 -0700
Delivery-date: Thu, 15 Apr 2010 01:37:43 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1271240945 -3600
# Node ID 07befd9cf6d3097c3b44d857c8498a6ed14772c7
# Parent  5057604eeefcb6479cc97c092a2399a115fae879
Architecture-independent, and tasklet-based, continue_hypercall_on_cpu().

Signed-off-by: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c                       |   76 ----------------------------
 xen/common/domain.c                         |   67 ++++++++++++++++++++++++
 xen/common/schedule.c                       |   46 +---------------
 xen/include/asm-ia64/linux-xen/asm/ptrace.h |    2 
 xen/include/asm-x86/domain.h                |    6 --
 xen/include/asm-x86/regs.h                  |    2 
 xen/include/xen/domain.h                    |    4 +
 xen/include/xen/sched.h                     |   10 +--
 8 files changed, 83 insertions(+), 130 deletions(-)

diff -r 5057604eeefc -r 07befd9cf6d3 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Apr 14 10:44:29 2010 +0100
+++ b/xen/arch/x86/domain.c     Wed Apr 14 11:29:05 2010 +0100
@@ -1517,82 +1517,6 @@ void sync_vcpu_execstate(struct vcpu *v)
     flush_tlb_mask(&v->vcpu_dirty_cpumask);
 }
 
-struct migrate_info {
-    long (*func)(void *data);
-    void *data;
-    void (*saved_schedule_tail)(struct vcpu *);
-    cpumask_t saved_affinity;
-    unsigned int nest;
-};
-
-static void continue_hypercall_on_cpu_helper(struct vcpu *v)
-{
-    struct cpu_user_regs *regs = guest_cpu_user_regs();
-    struct migrate_info *info = v->arch.continue_info;
-    cpumask_t mask = info->saved_affinity;
-    void (*saved_schedule_tail)(struct vcpu *) = info->saved_schedule_tail;
-
-    regs->eax = info->func(info->data);
-
-    if ( info->nest-- == 0 )
-    {
-        xfree(info);
-        v->arch.schedule_tail = saved_schedule_tail;
-        v->arch.continue_info = NULL;
-        vcpu_unlock_affinity(v, &mask);
-    }
-
-    (*saved_schedule_tail)(v);
-}
-
-int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data)
-{
-    struct vcpu *v = current;
-    struct migrate_info *info;
-    cpumask_t mask = cpumask_of_cpu(cpu);
-    int rc;
-
-    if ( cpu == smp_processor_id() )
-        return func(data);
-
-    info = v->arch.continue_info;
-    if ( info == NULL )
-    {
-        info = xmalloc(struct migrate_info);
-        if ( info == NULL )
-            return -ENOMEM;
-
-        rc = vcpu_lock_affinity(v, &mask);
-        if ( rc )
-        {
-            xfree(info);
-            return rc;
-        }
-
-        info->saved_schedule_tail = v->arch.schedule_tail;
-        info->saved_affinity = mask;
-        info->nest = 0;
-
-        v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
-        v->arch.continue_info = info;
-    }
-    else
-    {
-        BUG_ON(info->nest != 0);
-        rc = vcpu_locked_change_affinity(v, &mask);
-        if ( rc )
-            return rc;
-        info->nest++;
-    }
-
-    info->func = func;
-    info->data = data;
-
-    /* Dummy return value will be overwritten by new schedule_tail. */
-    BUG_ON(!test_bit(SCHEDULE_SOFTIRQ, &softirq_pending(smp_processor_id())));
-    return 0;
-}
-
 #define next_arg(fmt, args) ({                                              \
     unsigned long __arg;                                                    \
     switch ( *(fmt)++ )                                                     \
diff -r 5057604eeefc -r 07befd9cf6d3 xen/common/domain.c
--- a/xen/common/domain.c       Wed Apr 14 10:44:29 2010 +0100
+++ b/xen/common/domain.c       Wed Apr 14 11:29:05 2010 +0100
@@ -898,6 +898,73 @@ long vm_assist(struct domain *p, unsigne
     return -ENOSYS;
 }
 
+struct migrate_info {
+    long (*func)(void *data);
+    void *data;
+    struct vcpu *vcpu;
+    unsigned int nest;
+};
+
+static DEFINE_PER_CPU(struct migrate_info *, continue_info);
+
+static void continue_hypercall_tasklet_handler(unsigned long _info)
+{
+    struct migrate_info *info = (struct migrate_info *)_info;
+    struct vcpu *v = info->vcpu;
+
+    vcpu_sleep_sync(v);
+
+    this_cpu(continue_info) = info;
+    return_reg(v) = info->func(info->data);
+    this_cpu(continue_info) = NULL;
+
+    if ( info->nest-- == 0 )
+    {
+        xfree(info);
+        vcpu_unpause(v);
+    }
+}
+
+int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data)
+{
+    struct vcpu *curr = current;
+    struct migrate_info *info;
+
+    if ( cpu == smp_processor_id() )
+        return func(data);
+
+    info = this_cpu(continue_info);
+    if ( info == NULL )
+    {
+        info = xmalloc(struct migrate_info);
+        if ( info == NULL )
+            return -ENOMEM;
+
+        info->vcpu = curr;
+        info->nest = 0;
+
+        tasklet_init(
+            &curr->continue_hypercall_tasklet,
+            continue_hypercall_tasklet_handler,
+            (unsigned long)info);
+
+        vcpu_pause_nosync(curr);
+    }
+    else
+    {
+        BUG_ON(info->nest != 0);
+        info->nest++;
+    }
+
+    info->func = func;
+    info->data = data;
+
+    tasklet_schedule_on_cpu(&curr->continue_hypercall_tasklet, cpu);
+
+    /* Dummy return value will be overwritten by tasklet. */
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff -r 5057604eeefc -r 07befd9cf6d3 xen/common/schedule.c
--- a/xen/common/schedule.c     Wed Apr 14 10:44:29 2010 +0100
+++ b/xen/common/schedule.c     Wed Apr 14 11:29:05 2010 +0100
@@ -408,26 +408,18 @@ void cpu_disable_scheduler(void)
     }
 }
 
-static int __vcpu_set_affinity(
-    struct vcpu *v, cpumask_t *affinity,
-    bool_t old_lock_status, bool_t new_lock_status)
+int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
 {
     cpumask_t online_affinity, old_affinity;
+
+    if ( v->domain->is_pinned )
+        return -EINVAL;
 
     cpus_and(online_affinity, *affinity, cpu_online_map);
     if ( cpus_empty(online_affinity) )
         return -EINVAL;
 
     vcpu_schedule_lock_irq(v);
-
-    if ( v->affinity_locked != old_lock_status )
-    {
-        BUG_ON(!v->affinity_locked);
-        vcpu_schedule_unlock_irq(v);
-        return -EBUSY;
-    }
-
-    v->affinity_locked = new_lock_status;
 
     old_affinity = v->cpu_affinity;
     v->cpu_affinity = *affinity;
@@ -444,36 +436,6 @@ static int __vcpu_set_affinity(
     }
 
     return 0;
-}
-
-int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    if ( v->domain->is_pinned )
-        return -EINVAL;
-    return __vcpu_set_affinity(v, affinity, 0, 0);
-}
-
-int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    return __vcpu_set_affinity(v, affinity, 0, 1);
-}
-
-int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    return __vcpu_set_affinity(v, affinity, 1, 1);
-}
-
-void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    cpumask_t online_affinity;
-
-    /* Do not fail if no CPU in old affinity mask is online. */
-    cpus_and(online_affinity, *affinity, cpu_online_map);
-    if ( cpus_empty(online_affinity) )
-        *affinity = cpu_online_map;
-
-    if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
-        BUG();
 }
 
 /* Block the currently-executing domain until a pertinent event occurs. */
diff -r 5057604eeefc -r 07befd9cf6d3 xen/include/asm-ia64/linux-xen/asm/ptrace.h
--- a/xen/include/asm-ia64/linux-xen/asm/ptrace.h       Wed Apr 14 10:44:29 
2010 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/ptrace.h       Wed Apr 14 11:29:05 
2010 +0100
@@ -197,6 +197,8 @@ static inline struct cpu_user_regs *vcpu
 {
        return (struct cpu_user_regs *)((unsigned long)v + IA64_STK_OFFSET) - 1;
 }
+
+#define return_reg(v) (vcpu_regs(v)->r8)
 
 struct cpu_user_regs *guest_cpu_user_regs(void);
 
diff -r 5057604eeefc -r 07befd9cf6d3 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Wed Apr 14 10:44:29 2010 +0100
+++ b/xen/include/asm-x86/domain.h      Wed Apr 14 11:29:05 2010 +0100
@@ -381,9 +381,6 @@ struct arch_vcpu
     void (*ctxt_switch_from) (struct vcpu *);
     void (*ctxt_switch_to) (struct vcpu *);
 
-    /* Record information required to continue execution after migration */
-    void *continue_info;
-
     /* Bounce information for propagating an exception to guest OS. */
     struct trap_bounce trap_bounce;
 
@@ -450,9 +447,6 @@ struct arch_vcpu
 /* Shorthands to improve code legibility. */
 #define hvm_vmx         hvm_vcpu.u.vmx
 #define hvm_svm         hvm_vcpu.u.svm
-
-/* Continue the current hypercall via func(data) on specified cpu. */
-int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
 
 void vcpu_show_execution_state(struct vcpu *);
 void vcpu_show_registers(const struct vcpu *);
diff -r 5057604eeefc -r 07befd9cf6d3 xen/include/asm-x86/regs.h
--- a/xen/include/asm-x86/regs.h        Wed Apr 14 10:44:29 2010 +0100
+++ b/xen/include/asm-x86/regs.h        Wed Apr 14 11:29:05 2010 +0100
@@ -19,4 +19,6 @@
     (diff == 0);                                                              \
 })
 
+#define return_reg(v) ((v)->arch.guest_context.user_regs.eax)
+
 #endif /* __X86_REGS_H__ */
diff -r 5057604eeefc -r 07befd9cf6d3 xen/include/xen/domain.h
--- a/xen/include/xen/domain.h  Wed Apr 14 10:44:29 2010 +0100
+++ b/xen/include/xen/domain.h  Wed Apr 14 11:29:05 2010 +0100
@@ -3,6 +3,7 @@
 #define __XEN_DOMAIN_H__
 
 #include <public/xen.h>
+#include <asm/domain.h>
 
 typedef union {
     struct vcpu_guest_context *nat;
@@ -62,6 +63,9 @@ bool_t domctl_lock_acquire(void);
 bool_t domctl_lock_acquire(void);
 void domctl_lock_release(void);
 
+/* Continue the current hypercall via func(data) on specified cpu. */
+int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
+
 extern unsigned int xen_processor_pmbits;
 
 #endif /* __XEN_DOMAIN_H__ */
diff -r 5057604eeefc -r 07befd9cf6d3 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Wed Apr 14 10:44:29 2010 +0100
+++ b/xen/include/xen/sched.h   Wed Apr 14 11:29:05 2010 +0100
@@ -15,7 +15,7 @@
 #include <xen/timer.h>
 #include <xen/grant_table.h>
 #include <xen/rangeset.h>
-#include <asm/domain.h>
+#include <xen/domain.h>
 #include <xen/xenoprof.h>
 #include <xen/rcupdate.h>
 #include <xen/irq.h>
@@ -132,8 +132,6 @@ struct vcpu
     bool_t           defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     bool_t           paused_for_shutdown;
-    /* VCPU affinity is temporarily locked from controller changes? */
-    bool_t           affinity_locked;
 
     /*
      * > 0: a single port is being polled;
@@ -156,6 +154,9 @@ struct vcpu
 
     /* Bitmask of CPUs which are holding onto this VCPU's state. */
     cpumask_t        vcpu_dirty_cpumask;
+
+    /* Tasklet for continue_hypercall_on_cpu(). */
+    struct tasklet   continue_hypercall_tasklet;
 
     struct arch_vcpu arch;
 };
@@ -581,9 +582,6 @@ void vcpu_force_reschedule(struct vcpu *
 void vcpu_force_reschedule(struct vcpu *v);
 void cpu_disable_scheduler(void);
 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
-int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
-int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity);
-void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Architecture-independent, and tasklet-based, continue_hypercall_on_cpu()., Xen patchbot-unstable <=