# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID c445d4a0dd76b4859d058368ffab7c65f732acab
# Parent e3d7c21838661f1dd83c0ce49c3852a0c706f2cd
Define a new sched_op hypercall called sched_op_new, which differs from the
legacy hypercall in that it takes a pointer to a block of extra arguments
rather than an opaque unsigned long. The old hypercall still exists, for
backwards compatibility.
The new hypercall supports new sub-command SCHEDOP_poll, which can be used to
wait on a set of event-channel ports with an optional timeout. This is exported
in XenLinux as HYPERVISOR_poll, and used in the pcifront driver to wait on a
response from the pciback driver.
Can also be used for debuggers. :-)
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
Signed-off-by: John Levon <john.levon@xxxxxxx>
diff -r e3d7c2183866 -r c445d4a0dd76
linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c Tue Mar 14 16:35:38 2006
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c Tue Mar 14 18:33:45 2006
@@ -949,7 +949,7 @@
}
/* Convert jiffies to system time. */
-static inline u64 jiffies_to_st(unsigned long j)
+u64 jiffies_to_st(unsigned long j)
{
unsigned long seq;
long delta;
@@ -967,6 +967,7 @@
return st;
}
+EXPORT_SYMBOL(jiffies_to_st);
/*
* stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
diff -r e3d7c2183866 -r c445d4a0dd76
linux-2.6-xen-sparse/drivers/xen/pcifront/pci_op.c
--- a/linux-2.6-xen-sparse/drivers/xen/pcifront/pci_op.c Tue Mar 14
16:35:38 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/pcifront/pci_op.c Tue Mar 14
18:33:45 2006
@@ -40,9 +40,8 @@
{
int err = 0;
struct xen_pci_op *active_op = &pdev->sh_info->op;
- unsigned long irq_flags;
-
- unsigned int volatile ttl = (1U << 29);
+ unsigned long irq_flags, poll_end;
+ evtchn_port_t port = pdev->evtchn;
spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
@@ -51,14 +50,17 @@
/* Go */
wmb();
set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
- notify_remote_via_evtchn(pdev->evtchn);
-
- /* IRQs are disabled for the pci config. space reads/writes,
- * which means no event channel to notify us that the backend
- * is done so spin while waiting for the answer */
- while (test_bit
- (_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)) {
- if (!ttl) {
+ notify_remote_via_evtchn(port);
+
+ poll_end = jiffies + 5*HZ;
+ clear_evtchn(port);
+
+ while (test_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags)) {
+ if (HYPERVISOR_poll(&port, 1, poll_end))
+ BUG();
+ clear_evtchn(port);
+ if (time_after(jiffies, poll_end)) {
dev_err(&pdev->xdev->dev,
"pciback not responding!!!\n");
clear_bit(_XEN_PCIF_active,
@@ -66,7 +68,6 @@
err = XEN_PCI_ERR_dev_not_found;
goto out;
}
- ttl--;
}
memcpy(op, active_op, sizeof(struct xen_pci_op));
diff -r e3d7c2183866 -r c445d4a0dd76
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h Tue Mar
14 16:35:38 2006
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h Tue Mar
14 18:33:45 2006
@@ -33,6 +33,7 @@
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
#include <xen/interface/nmi.h>
+#include <linux/errno.h>
#define __STR(x) #x
#define STR(x) __STR(x)
@@ -167,6 +168,31 @@
int cmd, unsigned long arg)
{
return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_sched_op_new(
+ int cmd, void *arg)
+{
+ return _hypercall2(int, sched_op_new, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_poll(
+ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
+{
+ struct sched_poll sched_poll = {
+ .ports = ports,
+ .nr_ports = nr_ports,
+ .timeout = jiffies_to_st(timeout)
+ };
+
+ int rc = HYPERVISOR_sched_op_new(SCHEDOP_poll, &sched_poll);
+
+ if (rc == -ENOSYS)
+ rc = HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+
+ return rc;
}
static inline long
diff -r e3d7c2183866 -r c445d4a0dd76
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h Tue Mar
14 16:35:38 2006
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h Tue Mar
14 18:33:45 2006
@@ -97,6 +97,9 @@
void xen_destroy_contiguous_region(
unsigned long vstart, unsigned int order);
+/* Turn jiffies into Xen system time. */
+u64 jiffies_to_st(unsigned long jiffies);
+
#include <asm/hypercall.h>
#if defined(CONFIG_X86_64)
diff -r e3d7c2183866 -r c445d4a0dd76
linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Tue Mar 14 16:35:38 2006
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Tue Mar 14 18:33:45 2006
@@ -32,6 +32,7 @@
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
+#include <linux/errno.h>
/* FIXME: temp place to hold these page related macros */
#include <asm/page.h>
@@ -165,6 +166,31 @@
return _hypercall2(int, sched_op, cmd, arg);
}
+static inline int
+HYPERVISOR_sched_op_new(
+ int cmd, void *arg)
+{
+ return _hypercall2(int, sched_op_new, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_poll(
+ evtchn_port_t *ports, unsigned int nr_ports, unsigned long timeout)
+{
+ struct sched_poll sched_poll = {
+ .ports = ports,
+ .nr_ports = nr_ports,
+ .timeout = jiffies_to_st(timeout)
+ };
+
+ int rc = HYPERVISOR_sched_op_new(SCHEDOP_poll, &sched_poll);
+
+ if (rc == -ENOSYS)
+ rc = HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+
+ return rc;
+}
+
static inline long
HYPERVISOR_set_timer_op(
u64 timeout)
diff -r e3d7c2183866 -r c445d4a0dd76
linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h Tue Mar 14
16:35:38 2006
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h Tue Mar 14
18:33:45 2006
@@ -44,6 +44,9 @@
void force_evtchn_callback(void);
+/* Turn jiffies into Xen system time. XXX Implement me. */
+#define jiffies_to_st(j) 0
+
#include <asm/hypercall.h>
// for drivers/xen/privcmd/privcmd.c
diff -r e3d7c2183866 -r c445d4a0dd76
linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/hypercall.h Tue Mar
14 16:35:38 2006
+++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/hypercall.h Tue Mar
14 18:33:45 2006
@@ -37,6 +37,7 @@
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
#include <xen/interface/nmi.h>
+#include <linux/errno.h>
#define __STR(x) #x
#define STR(x) __STR(x)
@@ -172,6 +173,31 @@
int cmd, unsigned long arg)
{
return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_sched_op_new(
+ int cmd, void *arg)
+{
+ return _hypercall2(int, sched_op_new, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_poll(
+ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
+{
+ struct sched_poll sched_poll = {
+ .ports = ports,
+ .nr_ports = nr_ports,
+ .timeout = jiffies_to_st(timeout)
+ };
+
+ int rc = HYPERVISOR_sched_op_new(SCHEDOP_poll, &sched_poll);
+
+ if (rc == -ENOSYS)
+ rc = HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+
+ return rc;
}
static inline long
diff -r e3d7c2183866 -r c445d4a0dd76 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S Tue Mar 14 16:35:38 2006
+++ b/xen/arch/x86/x86_32/entry.S Tue Mar 14 18:33:45 2006
@@ -586,6 +586,13 @@
movl %eax,UREGS_eax(%ecx)
jmp do_sched_op
+do_arch_sched_op_new:
+ # Ensure we return success even if we return via schedule_tail()
+ xorl %eax,%eax
+ GET_GUEST_REGS(%ecx)
+ movl %eax,UREGS_eax(%ecx)
+ jmp do_sched_op_new
+
.data
ENTRY(exception_table)
@@ -640,6 +647,7 @@
.long do_mmuext_op
.long do_acm_op
.long do_nmi_op
+ .long do_arch_sched_op_new
.rept NR_hypercalls-((.-hypercall_table)/4)
.long do_ni_hypercall
.endr
@@ -674,6 +682,7 @@
.byte 4 /* do_mmuext_op */
.byte 1 /* do_acm_op */
.byte 2 /* do_nmi_op */
+ .byte 2 /* do_arch_sched_op_new */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
diff -r e3d7c2183866 -r c445d4a0dd76 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S Tue Mar 14 16:35:38 2006
+++ b/xen/arch/x86/x86_64/entry.S Tue Mar 14 18:33:45 2006
@@ -495,6 +495,13 @@
movq %rax,UREGS_rax(%r10)
jmp do_sched_op
+do_arch_sched_op_new:
+ # Ensure we return success even if we return via schedule_tail()
+ xorl %eax,%eax
+ GET_GUEST_REGS(%r10)
+ movq %rax,UREGS_rax(%r10)
+ jmp do_sched_op_new
+
.data
ENTRY(exception_table)
@@ -549,6 +556,7 @@
.quad do_mmuext_op
.quad do_acm_op
.quad do_nmi_op
+ .quad do_arch_sched_op_new
.rept NR_hypercalls-((.-hypercall_table)/8)
.quad do_ni_hypercall
.endr
@@ -583,6 +591,7 @@
.byte 4 /* do_mmuext_op */
.byte 1 /* do_acm_op */
.byte 2 /* do_nmi_op */
+ .byte 2 /* do_arch_sched_op_new */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
diff -r e3d7c2183866 -r c445d4a0dd76 xen/common/event_channel.c
--- a/xen/common/event_channel.c Tue Mar 14 16:35:38 2006
+++ b/xen/common/event_channel.c Tue Mar 14 18:33:45 2006
@@ -438,6 +438,47 @@
return ret;
}
+void evtchn_set_pending(struct vcpu *v, int port)
+{
+ struct domain *d = v->domain;
+ shared_info_t *s = d->shared_info;
+
+ /*
+ * The following bit operations must happen in strict order.
+ * NB. On x86, the atomic bit operations also act as memory barriers.
+ * There is therefore sufficiently strict ordering for this architecture --
+ * others may require explicit memory barriers.
+ */
+
+ if ( test_and_set_bit(port, &s->evtchn_pending[0]) )
+ return;
+
+ if ( !test_bit (port, &s->evtchn_mask[0]) &&
+ !test_and_set_bit(port / BITS_PER_LONG,
+ &v->vcpu_info->evtchn_pending_sel) &&
+ !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
+ {
+ evtchn_notify(v);
+ }
+ else if ( unlikely(test_bit(_VCPUF_blocked, &v->vcpu_flags) &&
+ v->vcpu_info->evtchn_upcall_mask) )
+ {
+ /*
+ * Blocked and masked will usually mean that the VCPU executed
+ * SCHEDOP_poll. Kick the VCPU in case this port is in its poll list.
+ */
+ vcpu_unblock(v);
+ }
+}
+
+void send_guest_virq(struct vcpu *v, int virq)
+{
+ int port = v->virq_to_evtchn[virq];
+
+ if ( likely(port != 0) )
+ evtchn_set_pending(v, port);
+}
+
void send_guest_pirq(struct domain *d, int pirq)
{
int port = d->pirq_to_evtchn[pirq];
diff -r e3d7c2183866 -r c445d4a0dd76 xen/common/schedule.c
--- a/xen/common/schedule.c Tue Mar 14 16:35:38 2006
+++ b/xen/common/schedule.c Tue Mar 14 18:33:45 2006
@@ -27,6 +27,7 @@
#include <xen/softirq.h>
#include <xen/trace.h>
#include <xen/mm.h>
+#include <xen/guest_access.h>
#include <public/sched.h>
#include <public/sched_ctl.h>
@@ -42,6 +43,7 @@
static void s_timer_fn(void *unused);
static void t_timer_fn(void *unused);
static void dom_timer_fn(void *data);
+static void poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
struct schedule_data schedule_data[NR_CPUS];
@@ -164,8 +166,9 @@
void sched_add_domain(struct vcpu *v)
{
- /* Initialise the per-domain timer. */
+ /* Initialise the per-domain timers. */
init_timer(&v->timer, dom_timer_fn, v, v->processor);
+ init_timer(&v->poll_timer, poll_timer_fn, v, v->processor);
if ( is_idle_vcpu(v) )
{
@@ -181,6 +184,8 @@
void sched_rem_domain(struct vcpu *v)
{
kill_timer(&v->timer);
+ kill_timer(&v->poll_timer);
+
SCHED_OP(rem_task, v);
TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
}
@@ -270,6 +275,55 @@
return 0;
}
+static long do_poll(struct sched_poll *sched_poll)
+{
+ struct vcpu *v = current;
+ evtchn_port_t port;
+ long rc = 0;
+ unsigned int i;
+
+ /* Fairly arbitrary limit. */
+ if ( sched_poll->nr_ports > 128 )
+ return -EINVAL;
+
+ if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
+ return -EFAULT;
+
+ /* Ensure that upcalls are disabled: tested by evtchn_set_pending(). */
+ if ( !v->vcpu_info->evtchn_upcall_mask )
+ return -EINVAL;
+
+ set_bit(_VCPUF_blocked, &v->vcpu_flags);
+
+ /* Check for events /after/ blocking: avoids wakeup waiting race. */
+ for ( i = 0; i < sched_poll->nr_ports; i++ )
+ {
+ rc = -EFAULT;
+ if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) )
+ goto out;
+
+ rc = -EINVAL;
+ if ( port >= MAX_EVTCHNS )
+ goto out;
+
+ rc = 0;
+ if ( evtchn_pending(v->domain, port) )
+ goto out;
+ }
+
+ if ( sched_poll->timeout != 0 )
+ set_timer(&v->poll_timer, sched_poll->timeout);
+
+ TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
+ __enter_scheduler();
+
+ stop_timer(&v->poll_timer);
+
+ out:
+ clear_bit(_VCPUF_blocked, &v->vcpu_flags);
+ return rc;
+}
+
/* Voluntarily yield the processor for this allocation. */
static long do_yield(void)
{
@@ -301,6 +355,61 @@
TRACE_3D(TRC_SCHED_SHUTDOWN,
current->domain->domain_id, current->vcpu_id, arg);
domain_shutdown(current->domain, (u8)arg);
+ break;
+ }
+
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+long do_sched_op_new(int cmd, GUEST_HANDLE(void) arg)
+{
+ long ret = 0;
+
+ switch ( cmd )
+ {
+ case SCHEDOP_yield:
+ {
+ ret = do_yield();
+ break;
+ }
+
+ case SCHEDOP_block:
+ {
+ ret = do_block();
+ break;
+ }
+
+ case SCHEDOP_shutdown:
+ {
+ struct sched_shutdown sched_shutdown;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&sched_shutdown, arg, 1) )
+ break;
+
+ ret = 0;
+ TRACE_3D(TRC_SCHED_SHUTDOWN,
+ current->domain->domain_id, current->vcpu_id,
+ sched_shutdown.reason);
+ domain_shutdown(current->domain, (u8)sched_shutdown.reason);
+
+ break;
+ }
+
+ case SCHEDOP_poll:
+ {
+ struct sched_poll sched_poll;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&sched_poll, arg, 1) )
+ break;
+
+ ret = do_poll(&sched_poll);
+
break;
}
@@ -518,6 +627,13 @@
send_guest_virq(v, VIRQ_TIMER);
}
+/* SCHEDOP_poll timeout callback. */
+static void poll_timer_fn(void *data)
+{
+ struct vcpu *v = data;
+ vcpu_unblock(v);
+}
+
/* Initialise the data structures. */
void __init scheduler_init(void)
{
diff -r e3d7c2183866 -r c445d4a0dd76 xen/include/public/event_channel.h
--- a/xen/include/public/event_channel.h Tue Mar 14 16:35:38 2006
+++ b/xen/include/public/event_channel.h Tue Mar 14 18:33:45 2006
@@ -10,6 +10,7 @@
#define __XEN_PUBLIC_EVENT_CHANNEL_H__
typedef uint32_t evtchn_port_t;
+DEFINE_GUEST_HANDLE(evtchn_port_t);
/*
* EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
diff -r e3d7c2183866 -r c445d4a0dd76 xen/include/public/sched.h
--- a/xen/include/public/sched.h Tue Mar 14 16:35:38 2006
+++ b/xen/include/public/sched.h Tue Mar 14 18:33:45 2006
@@ -9,16 +9,32 @@
#ifndef __XEN_PUBLIC_SCHED_H__
#define __XEN_PUBLIC_SCHED_H__
+#include "event_channel.h"
+
/*
- * Prototype for this hypercall is:
- * int sched_op(int cmd, unsigned long arg)
+ * There are two forms of this hypercall.
+ *
+ * The first and preferred version is only available from Xen 3.0.2.
+ * The prototype for this hypercall is:
+ * long sched_op_new(int cmd, void *arg)
* @cmd == SCHEDOP_??? (scheduler operation).
- * @arg == Operation-specific extra argument(s).
+ * @arg == Operation-specific extra argument(s), as described below.
+ *
+ * The legacy version of this hypercall supports only the following commands:
+ * SCHEDOP_yield, SCHEDOP_block, and SCHEDOP_shutdown. The prototype for the
+ * legacy hypercall is:
+ * long sched_op(int cmd, unsigned long arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
+ * == SHUTDOWN_* code (SCHEDOP_shutdown)
+ *
+ * The sub-command descriptions below describe extra arguments for the
+ * sched_op_new() hypercall.
*/
/*
* Voluntarily yield the CPU.
- * @arg == 0.
+ * @arg == NULL.
*/
#define SCHEDOP_yield 0
@@ -27,18 +43,35 @@
* If called with event upcalls masked, this operation will atomically
* reenable event delivery and check for pending events before blocking the
* VCPU. This avoids a "wakeup waiting" race.
- * @arg == 0.
+ * @arg == NULL.
*/
#define SCHEDOP_block 1
/*
* Halt execution of this domain (all VCPUs) and notify the system controller.
- * @arg == SHUTDOWN_??? (reason for shutdown).
+ * @arg == pointer to sched_shutdown structure.
*/
#define SCHEDOP_shutdown 2
+typedef struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* */
+} sched_shutdown_t;
+DEFINE_GUEST_HANDLE(sched_shutdown_t);
/*
- * Reason codes for SCHEDOP_shutdown. These may be interpreted by controller
+ * Poll a set of event-channel ports. Return when one or more are pending. An
+ * optional timeout may be specified.
+ * @arg == pointer to sched_poll structure.
+ */
+#define SCHEDOP_poll 3
+typedef struct sched_poll {
+ GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+} sched_poll_t;
+DEFINE_GUEST_HANDLE(sched_poll_t);
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
*/
diff -r e3d7c2183866 -r c445d4a0dd76 xen/include/public/xen.h
--- a/xen/include/public/xen.h Tue Mar 14 16:35:38 2006
+++ b/xen/include/public/xen.h Tue Mar 14 18:33:45 2006
@@ -59,6 +59,7 @@
#define __HYPERVISOR_mmuext_op 26
#define __HYPERVISOR_acm_op 27
#define __HYPERVISOR_nmi_op 28
+#define __HYPERVISOR_sched_op_new 29
/*
* VIRTUAL INTERRUPTS
diff -r e3d7c2183866 -r c445d4a0dd76 xen/include/xen/event.h
--- a/xen/include/xen/event.h Tue Mar 14 16:35:38 2006
+++ b/xen/include/xen/event.h Tue Mar 14 18:33:45 2006
@@ -15,41 +15,14 @@
#include <asm/bitops.h>
#include <asm/event.h>
-/*
- * EVENT-CHANNEL NOTIFICATIONS
- * NB. On x86, the atomic bit operations also act as memory barriers. There
- * is therefore sufficiently strict ordering for this architecture -- others
- * may require explicit memory barriers.
- */
-
-static inline void evtchn_set_pending(struct vcpu *v, int port)
-{
- struct domain *d = v->domain;
- shared_info_t *s = d->shared_info;
-
- /* These four operations must happen in strict order. */
- if ( !test_and_set_bit(port, &s->evtchn_pending[0]) &&
- !test_bit (port, &s->evtchn_mask[0]) &&
- !test_and_set_bit(port / BITS_PER_LONG,
- &v->vcpu_info->evtchn_pending_sel) &&
- !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
- {
- evtchn_notify(v);
- }
-}
+extern void evtchn_set_pending(struct vcpu *v, int port);
/*
* send_guest_virq:
* @v: VCPU to which virtual IRQ should be sent
* @virq: Virtual IRQ number (VIRQ_*)
*/
-static inline void send_guest_virq(struct vcpu *v, int virq)
-{
- int port = v->virq_to_evtchn[virq];
-
- if ( likely(port != 0) )
- evtchn_set_pending(v, port);
-}
+extern void send_guest_virq(struct vcpu *v, int virq);
/*
* send_guest_pirq:
@@ -63,6 +36,9 @@
(!!(v)->vcpu_info->evtchn_upcall_pending & \
!(v)->vcpu_info->evtchn_upcall_mask)
+#define evtchn_pending(d, p) \
+ (test_bit((p), &(d)->shared_info->evtchn_pending[0]))
+
/* Send a notification from a local event-channel port. */
extern long evtchn_send(unsigned int lport);
diff -r e3d7c2183866 -r c445d4a0dd76 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Tue Mar 14 16:35:38 2006
+++ b/xen/include/xen/sched.h Tue Mar 14 18:33:45 2006
@@ -66,6 +66,8 @@
struct timer timer; /* one-shot timer for timeout values */
unsigned long sleep_tick; /* tick at which this vcpu started sleep */
+
+ struct timer poll_timer; /* timeout for SCHEDOP_poll */
void *sched_priv; /* scheduler-specific data */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|