# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID ddc25d4ebf6023dba9c49597f3b39fe3f5dc9f0d
# Parent ea4829e3009209a62e1f3efbcd632cb40956f42c
[XEN] Replace direct common-code access of evtchn_upcall_mask
with local_event_delivery_* accessors.
Notes:
1. Still some (read-only, debug) use in keyhandler.c
2. Still accesses through current->vcpu_info.
Both above may need to be compiled only for architectures
that use event channels.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/common/event_channel.c | 2 +-
xen/common/schedule.c | 10 +++++-----
xen/include/asm-ia64/event.h | 20 ++++++++++++++++++++
xen/include/asm-ia64/vmx_vcpu.h | 2 --
xen/include/asm-x86/event.h | 26 ++++++++++++++++++++++----
xen/include/xen/event.h | 3 ---
xen/include/xen/sched.h | 2 +-
7 files changed, 49 insertions(+), 16 deletions(-)
diff -r ea4829e30092 -r ddc25d4ebf60 xen/common/event_channel.c
--- a/xen/common/event_channel.c Sat Jun 10 11:05:11 2006 +0100
+++ b/xen/common/event_channel.c Sat Jun 10 11:07:11 2006 +0100
@@ -499,7 +499,7 @@ void evtchn_set_pending(struct vcpu *v,
evtchn_notify(v);
}
else if ( unlikely(test_bit(_VCPUF_blocked, &v->vcpu_flags) &&
- v->vcpu_info->evtchn_upcall_mask) )
+ !local_event_delivery_is_enabled()) )
{
/*
* Blocked and masked will usually mean that the VCPU executed
diff -r ea4829e30092 -r ddc25d4ebf60 xen/common/schedule.c
--- a/xen/common/schedule.c Sat Jun 10 11:05:11 2006 +0100
+++ b/xen/common/schedule.c Sat Jun 10 11:07:11 2006 +0100
@@ -199,11 +199,11 @@ static long do_block(void)
{
struct vcpu *v = current;
- v->vcpu_info->evtchn_upcall_mask = 0;
+ local_event_delivery_enable();
set_bit(_VCPUF_blocked, &v->vcpu_flags);
/* Check for events /after/ blocking: avoids wakeup waiting race. */
- if ( event_pending(v) )
+ if ( local_events_need_delivery() )
{
clear_bit(_VCPUF_blocked, &v->vcpu_flags);
}
@@ -230,8 +230,8 @@ static long do_poll(struct sched_poll *s
if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
return -EFAULT;
- /* Ensure that upcalls are disabled: tested by evtchn_set_pending(). */
- if ( !v->vcpu_info->evtchn_upcall_mask )
+ /* Ensure that events are disabled: tested by evtchn_set_pending(). */
+ if ( local_event_delivery_is_enabled() )
return -EINVAL;
set_bit(_VCPUF_blocked, &v->vcpu_flags);
@@ -248,7 +248,7 @@ static long do_poll(struct sched_poll *s
goto out;
rc = 0;
- if ( evtchn_pending(v->domain, port) )
+ if ( test_bit(port, v->domain->shared_info->evtchn_pending) )
goto out;
}
diff -r ea4829e30092 -r ddc25d4ebf60 xen/include/asm-ia64/event.h
--- a/xen/include/asm-ia64/event.h Sat Jun 10 11:05:11 2006 +0100
+++ b/xen/include/asm-ia64/event.h Sat Jun 10 11:07:11 2006 +0100
@@ -37,6 +37,26 @@ static inline void evtchn_notify(struct
(!!(v)->vcpu_info->evtchn_upcall_pending & \
!(v)->vcpu_info->evtchn_upcall_mask)
+static inline int local_events_need_delivery(void)
+{
+ return event_pending(current);
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+ return !current->vcpu_info->evtchn_upcall_mask;
+}
+
+static inline void local_event_delivery_disable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
static inline int arch_virq_is_global(int virq)
{
int rc;
diff -r ea4829e30092 -r ddc25d4ebf60 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Sat Jun 10 11:05:11 2006 +0100
+++ b/xen/include/asm-ia64/vmx_vcpu.h Sat Jun 10 11:07:11 2006 +0100
@@ -488,6 +488,4 @@ vcpu_get_vhpt(VCPU *vcpu)
return &vcpu->arch.vhpt;
}
-#define check_work_pending(v) \
- (event_pending((v)) || ((v)->arch.irq_new_pending))
#endif
diff -r ea4829e30092 -r ddc25d4ebf60 xen/include/asm-x86/event.h
--- a/xen/include/asm-x86/event.h Sat Jun 10 11:05:11 2006 +0100
+++ b/xen/include/asm-x86/event.h Sat Jun 10 11:07:11 2006 +0100
@@ -26,10 +26,28 @@ static inline void evtchn_notify(struct
smp_send_event_check_cpu(v->processor);
}
-/* Note: Bitwise operations result in fast code with no branches. */
-#define event_pending(v) \
- (!!(v)->vcpu_info->evtchn_upcall_pending & \
- !(v)->vcpu_info->evtchn_upcall_mask)
+static inline int local_events_need_delivery(void)
+{
+ struct vcpu *v = current;
+ /* Note: Bitwise operations result in fast code with no branches. */
+ return (!!v->vcpu_info->evtchn_upcall_pending &
+ !v->vcpu_info->evtchn_upcall_mask);
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+ return !current->vcpu_info->evtchn_upcall_mask;
+}
+
+static inline void local_event_delivery_disable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 0;
+}
/* No arch specific virq definition now. Default to global. */
static inline int arch_virq_is_global(int virq)
diff -r ea4829e30092 -r ddc25d4ebf60 xen/include/xen/event.h
--- a/xen/include/xen/event.h Sat Jun 10 11:05:11 2006 +0100
+++ b/xen/include/xen/event.h Sat Jun 10 11:07:11 2006 +0100
@@ -38,9 +38,6 @@ extern void send_guest_global_virq(struc
*/
extern void send_guest_pirq(struct domain *d, int pirq);
-#define evtchn_pending(d, p) \
- (test_bit((p), &(d)->shared_info->evtchn_pending[0]))
-
/* Send a notification from a local event-channel port. */
extern long evtchn_send(unsigned int lport);
diff -r ea4829e30092 -r ddc25d4ebf60 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Sat Jun 10 11:05:11 2006 +0100
+++ b/xen/include/xen/sched.h Sat Jun 10 11:07:11 2006 +0100
@@ -318,7 +318,7 @@ unsigned long hypercall_create_continuat
#define hypercall_preempt_check() (unlikely( \
softirq_pending(smp_processor_id()) | \
- event_pending(current) \
+ local_events_need_delivery() \
))
/* This domain_hash and domain_list are protected by the domlist_lock. */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|