# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1223547471 -3600
# Node ID 0033c944318f266a0e367678bf9f46042ae03397
# Parent a11ad61bdb5b188a8116b533c87c31d6e9bd62d4
Rename evtchn_lock to event_lock, since it protects more than just
event-channel state now.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/hvm/svm/intr.c | 6 -
xen/arch/x86/hvm/vmx/intr.c | 6 -
xen/arch/x86/irq.c | 16 ++--
xen/arch/x86/physdev.c | 12 +--
xen/common/event_channel.c | 88 ++++++++++++------------
xen/drivers/passthrough/io.c | 36 ++++-----
xen/drivers/passthrough/pci.c | 4 -
xen/drivers/passthrough/vtd/x86/vtd.c | 6 -
xen/include/asm-x86/domain.h | 2
xen/include/asm-x86/hvm/irq.h | 2
xen/include/xen/sched.h | 2
xen/xsm/acm/acm_simple_type_enforcement_hooks.c | 8 +-
12 files changed, 94 insertions(+), 94 deletions(-)
diff -r a11ad61bdb5b -r 0033c944318f xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c Thu Oct 09 11:17:51 2008 +0100
@@ -124,11 +124,11 @@ static void svm_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
continue;
}
@@ -151,7 +151,7 @@ static void svm_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
}
diff -r a11ad61bdb5b -r 0033c944318f xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c Thu Oct 09 11:17:51 2008 +0100
@@ -127,11 +127,11 @@ static void vmx_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
continue;
}
@@ -154,7 +154,7 @@ static void vmx_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
}
diff -r a11ad61bdb5b -r 0033c944318f xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/arch/x86/irq.c Thu Oct 09 11:17:51 2008 +0100
@@ -514,7 +514,7 @@ int pirq_guest_bind(struct vcpu *v, int
int rc = 0;
cpumask_t cpumask = CPU_MASK_NONE;
- WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
+ WARN_ON(!spin_is_locked(&v->domain->event_lock));
BUG_ON(!local_irq_is_enabled());
retry:
@@ -684,7 +684,7 @@ void pirq_guest_unbind(struct domain *d,
irq_desc_t *desc;
int vector;
- WARN_ON(!spin_is_locked(&d->evtchn_lock));
+ WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
desc = domain_spin_lock_irq_desc(d, irq, NULL);
@@ -711,7 +711,7 @@ int pirq_guest_force_unbind(struct domai
irq_guest_action_t *action;
int i, bound = 0;
- WARN_ON(!spin_is_locked(&d->evtchn_lock));
+ WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
desc = domain_spin_lock_irq_desc(d, irq, NULL);
@@ -738,7 +738,7 @@ int get_free_pirq(struct domain *d, int
{
int i;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
if ( type == MAP_PIRQ_TYPE_GSI )
{
@@ -768,7 +768,7 @@ int map_domain_pirq(
irq_desc_t *desc;
unsigned long flags;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
if ( !IS_PRIV(current->domain) )
return -EPERM;
@@ -836,7 +836,7 @@ int unmap_domain_pirq(struct domain *d,
if ( !IS_PRIV(current->domain) )
return -EINVAL;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
vector = d->arch.pirq_vector[pirq];
if ( vector <= 0 )
@@ -892,13 +892,13 @@ void free_domain_pirqs(struct domain *d)
{
int i;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
for ( i = 0; i < NR_PIRQS; i++ )
if ( d->arch.pirq_vector[i] > 0 )
unmap_domain_pirq(d, i);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
extern void dump_ioapic_irq_info(void);
diff -r a11ad61bdb5b -r 0033c944318f xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/arch/x86/physdev.c Thu Oct 09 11:17:51 2008 +0100
@@ -100,7 +100,7 @@ static int physdev_map_pirq(struct physd
}
/* Verify or get pirq. */
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( map->pirq < 0 )
{
if ( d->arch.vector_pirq[vector] )
@@ -145,7 +145,7 @@ static int physdev_map_pirq(struct physd
map->pirq = pirq;
done:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
free_irq_vector(vector);
free_domain:
@@ -169,9 +169,9 @@ static int physdev_unmap_pirq(struct phy
if ( d == NULL )
return -ESRCH;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
ret = unmap_domain_pirq(d, unmap->pirq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
@@ -298,10 +298,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
irq_op.vector = assign_irq_vector(irq);
- spin_lock(&dom0->evtchn_lock);
+ spin_lock(&dom0->event_lock);
ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
MAP_PIRQ_TYPE_GSI, NULL);
- spin_unlock(&dom0->evtchn_lock);
+ spin_unlock(&dom0->event_lock);
if ( copy_to_guest(arg, &irq_op, 1) != 0 )
ret = -EFAULT;
diff -r a11ad61bdb5b -r 0033c944318f xen/common/event_channel.c
--- a/xen/common/event_channel.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/common/event_channel.c Thu Oct 09 11:17:51 2008 +0100
@@ -133,7 +133,7 @@ static long evtchn_alloc_unbound(evtchn_
if ( rc )
return rc;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT_DOM(port, d);
@@ -150,7 +150,7 @@ static long evtchn_alloc_unbound(evtchn_
alloc->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
@@ -174,14 +174,14 @@ static long evtchn_bind_interdomain(evtc
/* Avoid deadlock by first acquiring lock of domain with smaller id. */
if ( ld < rd )
{
- spin_lock(&ld->evtchn_lock);
- spin_lock(&rd->evtchn_lock);
+ spin_lock(&ld->event_lock);
+ spin_lock(&rd->event_lock);
}
else
{
if ( ld != rd )
- spin_lock(&rd->evtchn_lock);
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&rd->event_lock);
+ spin_lock(&ld->event_lock);
}
if ( (lport = get_free_port(ld)) < 0 )
@@ -216,9 +216,9 @@ static long evtchn_bind_interdomain(evtc
bind->local_port = lport;
out:
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
if ( ld != rd )
- spin_unlock(&rd->evtchn_lock);
+ spin_unlock(&rd->event_lock);
rcu_unlock_domain(rd);
@@ -244,7 +244,7 @@ static long evtchn_bind_virq(evtchn_bind
((v = d->vcpu[vcpu]) == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( v->virq_to_evtchn[virq] != 0 )
ERROR_EXIT(-EEXIST);
@@ -260,7 +260,7 @@ static long evtchn_bind_virq(evtchn_bind
v->virq_to_evtchn[virq] = bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -277,7 +277,7 @@ static long evtchn_bind_ipi(evtchn_bind_
(d->vcpu[vcpu] == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT(port);
@@ -289,7 +289,7 @@ static long evtchn_bind_ipi(evtchn_bind_
bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -308,7 +308,7 @@ static long evtchn_bind_pirq(evtchn_bind
if ( !irq_access_permitted(d, pirq) )
return -EPERM;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( d->pirq_to_evtchn[pirq] != 0 )
ERROR_EXIT(-EEXIST);
@@ -333,7 +333,7 @@ static long evtchn_bind_pirq(evtchn_bind
bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -348,7 +348,7 @@ static long __evtchn_close(struct domain
long rc = 0;
again:
- spin_lock(&d1->evtchn_lock);
+ spin_lock(&d1->event_lock);
if ( !port_is_valid(d1, port1) )
{
@@ -404,12 +404,12 @@ static long __evtchn_close(struct domain
if ( d1 < d2 )
{
- spin_lock(&d2->evtchn_lock);
+ spin_lock(&d2->event_lock);
}
else if ( d1 != d2 )
{
- spin_unlock(&d1->evtchn_lock);
- spin_lock(&d2->evtchn_lock);
+ spin_unlock(&d1->event_lock);
+ spin_lock(&d2->event_lock);
goto again;
}
}
@@ -454,11 +454,11 @@ static long __evtchn_close(struct domain
if ( d2 != NULL )
{
if ( d1 != d2 )
- spin_unlock(&d2->evtchn_lock);
+ spin_unlock(&d2->event_lock);
put_domain(d2);
}
- spin_unlock(&d1->evtchn_lock);
+ spin_unlock(&d1->event_lock);
return rc;
}
@@ -476,11 +476,11 @@ int evtchn_send(struct domain *d, unsign
struct vcpu *rvcpu;
int rport, ret = 0;
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&ld->event_lock);
if ( unlikely(!port_is_valid(ld, lport)) )
{
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return -EINVAL;
}
@@ -489,7 +489,7 @@ int evtchn_send(struct domain *d, unsign
/* Guest cannot send via a Xen-attached event channel. */
if ( unlikely(lchn->consumer_is_xen) )
{
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return -EINVAL;
}
@@ -527,7 +527,7 @@ int evtchn_send(struct domain *d, unsign
}
out:
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return ret;
}
@@ -656,7 +656,7 @@ static long evtchn_status(evtchn_status_
if ( rc )
return rc;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
@@ -704,7 +704,7 @@ static long evtchn_status(evtchn_status_
status->vcpu = chn->notify_vcpu_id;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
@@ -720,7 +720,7 @@ long evtchn_bind_vcpu(unsigned int port,
if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
@@ -756,7 +756,7 @@ long evtchn_bind_vcpu(unsigned int port,
}
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
@@ -768,11 +768,11 @@ static long evtchn_unmask(evtchn_unmask_
int port = unmask->port;
struct vcpu *v;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( unlikely(!port_is_valid(d, port)) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
@@ -790,7 +790,7 @@ static long evtchn_unmask(evtchn_unmask_
vcpu_mark_events_pending(v);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return 0;
}
@@ -944,7 +944,7 @@ int alloc_unbound_xen_event_channel(
struct domain *d = local_vcpu->domain;
int port;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
goto out;
@@ -956,7 +956,7 @@ int alloc_unbound_xen_event_channel(
chn->u.unbound.remote_domid = remote_domid;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return port;
}
@@ -968,11 +968,11 @@ void free_xen_event_channel(
struct evtchn *chn;
struct domain *d = local_vcpu->domain;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( unlikely(d->is_dying) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
@@ -981,7 +981,7 @@ void free_xen_event_channel(
BUG_ON(!chn->consumer_is_xen);
chn->consumer_is_xen = 0;
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
(void)__evtchn_close(d, port);
}
@@ -993,7 +993,7 @@ void notify_via_xen_event_channel(int lp
struct domain *ld = current->domain, *rd;
int rport;
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&ld->event_lock);
ASSERT(port_is_valid(ld, lport));
lchn = evtchn_from_port(ld, lport);
@@ -1007,13 +1007,13 @@ void notify_via_xen_event_channel(int lp
evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
}
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
}
int evtchn_init(struct domain *d)
{
- spin_lock_init(&d->evtchn_lock);
+ spin_lock_init(&d->event_lock);
if ( get_free_port(d) != 0 )
return -EINVAL;
evtchn_from_port(d, 0)->state = ECS_RESERVED;
@@ -1027,7 +1027,7 @@ void evtchn_destroy(struct domain *d)
/* After this barrier no new event-channel allocations can occur. */
BUG_ON(!d->is_dying);
- spin_barrier(&d->evtchn_lock);
+ spin_barrier(&d->event_lock);
/* Close all existing event channels. */
for ( i = 0; port_is_valid(d, i); i++ )
@@ -1037,14 +1037,14 @@ void evtchn_destroy(struct domain *d)
}
/* Free all event-channel buckets. */
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
{
xsm_free_security_evtchn(d->evtchn[i]);
xfree(d->evtchn[i]);
d->evtchn[i] = NULL;
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
static void domain_dump_evtchn_info(struct domain *d)
@@ -1053,7 +1053,7 @@ static void domain_dump_evtchn_info(stru
printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
- if ( !spin_trylock(&d->evtchn_lock) )
+ if ( !spin_trylock(&d->event_lock) )
return;
printk("Event channel information for domain %d:\n",
@@ -1094,7 +1094,7 @@ static void domain_dump_evtchn_info(stru
printk(" x=%d\n", chn->consumer_is_xen);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
static void dump_evtchn_info(unsigned char key)
diff -r a11ad61bdb5b -r 0033c944318f xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/drivers/passthrough/io.c Thu Oct 09 11:17:51 2008 +0100
@@ -30,7 +30,7 @@ static void pt_irq_time_out(void *data)
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
- spin_lock(&irq_map->dom->evtchn_lock);
+ spin_lock(&irq_map->dom->event_lock);
dpci = domain_get_irq_dpci(irq_map->dom);
ASSERT(dpci);
@@ -46,7 +46,7 @@ static void pt_irq_time_out(void *data)
clear_bit(machine_gsi, dpci->dirq_mask);
vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
dpci->mirq[machine_gsi].pending = 0;
- spin_unlock(&irq_map->dom->evtchn_lock);
+ spin_unlock(&irq_map->dom->event_lock);
pirq_guest_eoi(irq_map->dom, machine_gsi);
}
@@ -62,7 +62,7 @@ int pt_irq_create_bind_vtd(
if ( pirq < 0 || pirq >= NR_PIRQS )
return -EINVAL;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
@@ -70,7 +70,7 @@ int pt_irq_create_bind_vtd(
hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -ENOMEM;
}
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
@@ -81,7 +81,7 @@ int pt_irq_create_bind_vtd(
if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
{
xfree(hvm_irq_dpci);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
@@ -101,7 +101,7 @@ int pt_irq_create_bind_vtd(
||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EBUSY;
}
}
@@ -117,7 +117,7 @@ int pt_irq_create_bind_vtd(
digl = xmalloc(struct dev_intx_gsi_link);
if ( !digl )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -ENOMEM;
}
@@ -149,7 +149,7 @@ int pt_irq_create_bind_vtd(
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return 0;
}
@@ -172,13 +172,13 @@ int pt_irq_destroy_bind_vtd(
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
@@ -213,7 +213,7 @@ int pt_irq_destroy_bind_vtd(
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
@@ -254,7 +254,7 @@ void hvm_dpci_msi_eoi(struct domain *d,
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
@@ -265,7 +265,7 @@ void hvm_dpci_msi_eoi(struct domain *d,
desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if (!desc)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
@@ -275,7 +275,7 @@ void hvm_dpci_msi_eoi(struct domain *d,
pirq_guest_eoi(d, pirq);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
@@ -293,14 +293,14 @@ void hvm_dpci_eoi(struct domain *d, unsi
return;
}
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if((hvm_irq_dpci == NULL) ||
(guest_gsi >= NR_ISAIRQS &&
!hvm_irq_dpci->girq[guest_gsi].valid) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
@@ -322,5 +322,5 @@ void hvm_dpci_eoi(struct domain *d, unsi
pirq_guest_eoi(d, machine_gsi);
}
}
- spin_unlock(&d->evtchn_lock);
-}
+ spin_unlock(&d->event_lock);
+}
diff -r a11ad61bdb5b -r 0033c944318f xen/drivers/passthrough/pci.c
--- a/xen/drivers/passthrough/pci.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/drivers/passthrough/pci.c Thu Oct 09 11:17:51 2008 +0100
@@ -165,7 +165,7 @@ static void pci_clean_dpci_irqs(struct d
if ( !is_hvm_domain(d) && !need_iommu(d) )
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci != NULL )
{
@@ -189,7 +189,7 @@ static void pci_clean_dpci_irqs(struct d
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
void pci_release_devices(struct domain *d)
diff -r a11ad61bdb5b -r 0033c944318f xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c Thu Oct 09 11:17:51 2008 +0100
@@ -93,13 +93,13 @@ void hvm_dpci_isairq_eoi(struct domain *
if ( !vtd_enabled)
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
dpci = domain_get_irq_dpci(d);
if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
/* Multiple mirq may be mapped to one isa irq */
@@ -121,5 +121,5 @@ void hvm_dpci_isairq_eoi(struct domain *
}
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
diff -r a11ad61bdb5b -r 0033c944318f xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/include/asm-x86/domain.h Thu Oct 09 11:17:51 2008 +0100
@@ -235,7 +235,7 @@ struct arch_domain
/* Shadow translated domain: P2M mapping */
pagetable_t phys_table;
- /* NB. protected by d->evtchn_lock and by irq_desc[vector].lock */
+ /* NB. protected by d->event_lock and by irq_desc[vector].lock */
int vector_pirq[NR_VECTORS];
int pirq_vector[NR_PIRQS];
diff -r a11ad61bdb5b -r 0033c944318f xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/include/asm-x86/hvm/irq.h Thu Oct 09 11:17:51 2008 +0100
@@ -63,7 +63,7 @@ struct hvm_girq_dpci_mapping {
#define NR_ISAIRQS 16
#define NR_LINK 4
-/* Protected by domain's evtchn_lock */
+/* Protected by domain's event_lock */
struct hvm_irq_dpci {
/* Machine IRQ to guest device/intx mapping. */
DECLARE_BITMAP(mapping, NR_PIRQS);
diff -r a11ad61bdb5b -r 0033c944318f xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Thu Oct 09 11:14:52 2008 +0100
+++ b/xen/include/xen/sched.h Thu Oct 09 11:17:51 2008 +0100
@@ -188,7 +188,7 @@ struct domain
/* Event channel information. */
struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
- spinlock_t evtchn_lock;
+ spinlock_t event_lock;
struct grant_table *grant_table;
diff -r a11ad61bdb5b -r 0033c944318f
xen/xsm/acm/acm_simple_type_enforcement_hooks.c
--- a/xen/xsm/acm/acm_simple_type_enforcement_hooks.c Thu Oct 09 11:14:52
2008 +0100
+++ b/xen/xsm/acm/acm_simple_type_enforcement_hooks.c Thu Oct 09 11:17:51
2008 +0100
@@ -248,11 +248,11 @@ ste_init_state(struct acm_sized_buffer *
/* a) check for event channel conflicts */
for ( bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++ )
{
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
ports = d->evtchn[bucket];
if ( ports == NULL)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
break;
}
@@ -280,7 +280,7 @@ ste_init_state(struct acm_sized_buffer *
printkd("%s: Policy violation in event channel domain "
"%x -> domain %x.\n",
__func__, d->domain_id, rdomid);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
acm_array_append_tuple(errors,
ACM_EVTCHN_SHARING_VIOLATION,
@@ -288,7 +288,7 @@ ste_init_state(struct acm_sized_buffer *
goto out;
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|