# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1223547292 -3600
# Node ID a11ad61bdb5b188a8116b533c87c31d6e9bd62d4
# Parent b8f329d2c074a06b47f3be2b4e0bfe1ac5b232e5
Fix lock issue for hvm pass-through domain
This patch protect the hvm_irq_dpci structure with evtchn_lock, thus
the access to domain's pirq_vector mapping is also protected.
Signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>
---
xen/arch/x86/hvm/svm/intr.c | 5
xen/arch/x86/hvm/vmsi.c | 2
xen/arch/x86/hvm/vmx/intr.c | 15 +-
xen/arch/x86/irq.c | 2
xen/drivers/passthrough/io.c | 182 ++++++++++++++++++++--------------
xen/drivers/passthrough/pci.c | 14 +-
xen/drivers/passthrough/vtd/x86/vtd.c | 26 ++--
xen/include/asm-x86/hvm/irq.h | 6 -
xen/include/xen/irq.h | 2
9 files changed, 152 insertions(+), 102 deletions(-)
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c Thu Oct 09 11:14:52 2008 +0100
@@ -124,9 +124,11 @@ static void svm_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
+ spin_lock(&d->evtchn_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
+ spin_unlock(&d->evtchn_lock);
continue;
}
@@ -137,9 +139,7 @@ static void svm_dirq_assist(struct vcpu
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
- spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
@@ -151,6 +151,7 @@ static void svm_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
+ spin_unlock(&d->evtchn_lock);
}
}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/hvm/vmsi.c
--- a/xen/arch/x86/hvm/vmsi.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/hvm/vmsi.c Thu Oct 09 11:14:52 2008 +0100
@@ -134,7 +134,7 @@ int vmsi_deliver(struct domain *d, int p
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
- if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
+ if ( !test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags) )
{
gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
return 0;
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c Thu Oct 09 11:14:52 2008 +0100
@@ -127,11 +127,13 @@ static void vmx_dirq_assist(struct vcpu
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- if ( test_bit(_HVM_IRQ_DPCI_MSI,
&hvm_irq_dpci->mirq[irq].flags) )
- {
- hvm_pci_msi_assert(d, irq);
- continue;
- }
+ spin_lock(&d->evtchn_lock);
+ if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
+ {
+ hvm_pci_msi_assert(d, irq);
+ spin_unlock(&d->evtchn_lock);
+ continue;
+ }
stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
@@ -140,9 +142,7 @@ static void vmx_dirq_assist(struct vcpu
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
- spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
@@ -154,6 +154,7 @@ static void vmx_dirq_assist(struct vcpu
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
+ spin_unlock(&d->evtchn_lock);
}
}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/arch/x86/irq.c Thu Oct 09 11:14:52 2008 +0100
@@ -285,7 +285,7 @@ static void __do_IRQ_guest(int vector)
* The descriptor is returned locked. This function is safe against changes
* to the per-domain irq-to-vector mapping.
*/
-static irq_desc_t *domain_spin_lock_irq_desc(
+irq_desc_t *domain_spin_lock_irq_desc(
struct domain *d, int irq, unsigned long *pflags)
{
unsigned int vector;
diff -r b8f329d2c074 -r a11ad61bdb5b xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/drivers/passthrough/io.c Thu Oct 09 11:14:52 2008 +0100
@@ -26,10 +26,14 @@ static void pt_irq_time_out(void *data)
struct hvm_mirq_dpci_mapping *irq_map = data;
unsigned int guest_gsi, machine_gsi = 0;
int vector;
- struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
+ struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
+ spin_lock(&irq_map->dom->evtchn_lock);
+
+ dpci = domain_get_irq_dpci(irq_map->dom);
+ ASSERT(dpci);
list_for_each_entry ( digl, &irq_map->digl_list, list )
{
guest_gsi = digl->gsi;
@@ -41,55 +45,65 @@ static void pt_irq_time_out(void *data)
clear_bit(machine_gsi, dpci->dirq_mask);
vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
- stop_timer(&dpci->hvm_timer[vector]);
- spin_lock(&dpci->dirq_lock);
dpci->mirq[machine_gsi].pending = 0;
- spin_unlock(&dpci->dirq_lock);
+ spin_unlock(&irq_map->dom->evtchn_lock);
pirq_guest_eoi(irq_map->dom, machine_gsi);
}
int pt_irq_create_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct dev_intx_gsi_link *digl;
-
+ int pirq = pt_irq_bind->machine_irq;
+
+ if ( pirq < 0 || pirq >= NR_PIRQS )
+ return -EINVAL;
+
+ spin_lock(&d->evtchn_lock);
+
+ hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
{
hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
-
+ }
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
- spin_lock_init(&hvm_irq_dpci->dirq_lock);
for ( int i = 0; i < NR_IRQS; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
-
- if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
- xfree(hvm_irq_dpci);
+ }
+
+ if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
+ {
+ xfree(hvm_irq_dpci);
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
}
if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
{
- int pirq = pt_irq_bind->machine_irq;
-
- if ( pirq < 0 || pirq >= NR_IRQS )
- return -EINVAL;
-
- if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID ) )
- {
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |
- HVM_IRQ_DPCI_MSI ;
+
+ if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
+ {
+ set_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
+ /* bind after hvm_irq_dpci is setup to avoid race with irq
handler*/
pirq_guest_bind(d->vcpu[0], pirq, 0);
}
-
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI
;
- hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
- hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
- hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
-
+ else if (hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec
+ ||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
+
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EBUSY;
+ }
}
else
{
@@ -102,7 +116,10 @@ int pt_irq_create_bind_vtd(
digl = xmalloc(struct dev_intx_gsi_link);
if ( !digl )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
+ }
digl->device = device;
digl->intx = intx;
@@ -117,11 +134,11 @@ int pt_irq_create_bind_vtd(
hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
/* Bind the same mirq once in the same domain */
- if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
- {
- hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
+ if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
+ {
hvm_irq_dpci->mirq[machine_gsi].dom = d;
+ /* Init timer before binding */
init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d,
machine_gsi)],
pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
/* Deal with gsi for legacy devices */
@@ -132,37 +149,45 @@ int pt_irq_create_bind_vtd(
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
}
+ spin_unlock(&d->evtchn_lock);
return 0;
}
int pt_irq_destroy_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct list_head *digl_list, *tmp;
struct dev_intx_gsi_link *digl;
-
- if ( hvm_irq_dpci == NULL )
- return 0;
machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
intx = pt_irq_bind->u.pci.intx;
guest_gsi = hvm_pci_intx_gsi(device, intx);
link = hvm_pci_intx_link(device, intx);
- hvm_irq_dpci->link_cnt[link]--;
gdprintk(XENLOG_INFO,
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
+ spin_lock(&d->evtchn_lock);
+
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
+ }
+
+ hvm_irq_dpci->link_cnt[link]--;
memset(&hvm_irq_dpci->girq[guest_gsi], 0,
sizeof(struct hvm_girq_dpci_mapping));
/* clear the mirq info */
- if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+ if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
{
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[machine_gsi].digl_list )
@@ -185,9 +210,10 @@ int pt_irq_destroy_bind_vtd(
kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d,
machine_gsi)]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
hvm_irq_dpci->mirq[machine_gsi].flags = 0;
- }
- }
-
+ clear_bit(machine_gsi, hvm_irq_dpci->mapping);
+ }
+ }
+ spin_unlock(&d->evtchn_lock);
gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
@@ -199,8 +225,9 @@ int hvm_do_IRQ_dpci(struct domain *d, un
{
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+ ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
if ( !iommu_enabled || (d == dom0) || !dpci ||
- !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
+ !test_bit(mirq, dpci->mapping))
return 0;
/*
@@ -218,69 +245,82 @@ int hvm_do_IRQ_dpci(struct domain *d, un
return 1;
}
-
void hvm_dpci_msi_eoi(struct domain *d, int vector)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+ irq_desc_t *desc;
int pirq;
- unsigned long flags;
- irq_desc_t *desc;
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
+ spin_lock(&d->evtchn_lock);
pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
- {
- int vec;
- vec = domain_irq_to_vector(d, pirq);
- desc = &irq_desc[vec];
-
- spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~IRQ_INPROGRESS;
- spin_unlock_irqrestore(&desc->lock, flags);
-
- pirq_guest_eoi(d, pirq);
- }
+ test_bit(pirq, hvm_irq_dpci->mapping) &&
+ (test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags)))
+ {
+ BUG_ON(!local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if (!desc)
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
+
+ desc->status &= ~IRQ_INPROGRESS;
+ spin_unlock_irq(&desc->lock);
+
+ pirq_guest_eoi(d, pirq);
+ }
+
+ spin_unlock(&d->evtchn_lock);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
union vioapic_redir_entry *ent)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t device, intx, machine_gsi;
- if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
+ if ( !iommu_enabled)
+ return;
+
+ if ( guest_gsi < NR_ISAIRQS )
+ {
+ hvm_dpci_isairq_eoi(d, guest_gsi);
+ return;
+ }
+
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if((hvm_irq_dpci == NULL) ||
(guest_gsi >= NR_ISAIRQS &&
!hvm_irq_dpci->girq[guest_gsi].valid) )
+ {
+ spin_unlock(&d->evtchn_lock);
return;
-
- if ( guest_gsi < NR_ISAIRQS )
- {
- hvm_dpci_isairq_eoi(d, guest_gsi);
- return;
- }
-
- machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
+ }
+
device = hvm_irq_dpci->girq[guest_gsi].device;
intx = hvm_irq_dpci->girq[guest_gsi].intx;
hvm_pci_intx_deassert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
+ machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
{
- spin_unlock(&hvm_irq_dpci->dirq_lock);
-
if ( (ent == NULL) || !ent->fields.mask )
{
+ /*
+ * No need to get vector lock for timer
+ * since interrupt is still not EOIed
+ */
stop_timer(&hvm_irq_dpci->hvm_timer[
domain_irq_to_vector(d, machine_gsi)]);
pirq_guest_eoi(d, machine_gsi);
}
}
- else
- spin_unlock(&hvm_irq_dpci->dirq_lock);
-}
+ spin_unlock(&d->evtchn_lock);
+}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/drivers/passthrough/pci.c
--- a/xen/drivers/passthrough/pci.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/drivers/passthrough/pci.c Thu Oct 09 11:14:52 2008 +0100
@@ -154,7 +154,7 @@ int pci_remove_device(u8 bus, u8 devfn)
static void pci_clean_dpci_irqs(struct domain *d)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t i;
struct list_head *digl_list, *tmp;
struct dev_intx_gsi_link *digl;
@@ -165,13 +165,14 @@ static void pci_clean_dpci_irqs(struct d
if ( !is_hvm_domain(d) && !need_iommu(d) )
return;
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci != NULL )
{
- for ( i = 0; i < NR_IRQS; i++ )
- {
- if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
- continue;
-
+ for ( i = find_first_bit(hvm_irq_dpci->mapping, NR_PIRQS);
+ i < NR_PIRQS;
+ i = find_next_bit(hvm_irq_dpci->mapping, NR_PIRQS, i + 1) )
+ {
pirq_guest_unbind(d, i);
kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
@@ -188,6 +189,7 @@ static void pci_clean_dpci_irqs(struct d
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
+ spin_unlock(&d->evtchn_lock);
}
void pci_release_devices(struct domain *d)
diff -r b8f329d2c074 -r a11ad61bdb5b xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c Thu Oct 09 11:14:52 2008 +0100
@@ -85,37 +85,41 @@ void hvm_dpci_isairq_eoi(struct domain *
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
- struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl, *tmp;
int i;
ASSERT(isairq < NR_ISAIRQS);
- if ( !vtd_enabled || !dpci ||
- !test_bit(isairq, dpci->isairq_map) )
+ if ( !vtd_enabled)
return;
+ spin_lock(&d->evtchn_lock);
+
+ dpci = domain_get_irq_dpci(d);
+
+ if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
/* Multiple mirq may be mapped to one isa irq */
- for ( i = 0; i < NR_IRQS; i++ )
+ for ( i = find_first_bit(dpci->mapping, NR_PIRQS);
+ i < NR_PIRQS;
+ i = find_next_bit(dpci->mapping, NR_PIRQS, i + 1) )
{
- if ( !dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID )
- continue;
-
list_for_each_entry_safe ( digl, tmp,
&dpci->mirq[i].digl_list, list )
{
if ( hvm_irq->pci_link.route[digl->link] == isairq )
{
hvm_pci_intx_deassert(d, digl->device, digl->intx);
- spin_lock(&dpci->dirq_lock);
if ( --dpci->mirq[i].pending == 0 )
{
- spin_unlock(&dpci->dirq_lock);
stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
pirq_guest_eoi(d, i);
}
- else
- spin_unlock(&dpci->dirq_lock);
}
}
}
+ spin_unlock(&d->evtchn_lock);
}
diff -r b8f329d2c074 -r a11ad61bdb5b xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/include/asm-x86/hvm/irq.h Thu Oct 09 11:14:52 2008 +0100
@@ -25,6 +25,7 @@
#include <xen/types.h>
#include <xen/spinlock.h>
#include <asm/irq.h>
+#include <asm/pirq.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/vpic.h>
#include <asm/hvm/vioapic.h>
@@ -38,8 +39,6 @@ struct dev_intx_gsi_link {
uint8_t link;
};
-#define HVM_IRQ_DPCI_VALID 0x1
-#define HVM_IRQ_DPCI_MSI 0x2
#define _HVM_IRQ_DPCI_MSI 0x1
struct hvm_gmsi_info {
@@ -64,9 +63,10 @@ struct hvm_girq_dpci_mapping {
#define NR_ISAIRQS 16
#define NR_LINK 4
+/* Protected by domain's evtchn_lock */
struct hvm_irq_dpci {
- spinlock_t dirq_lock;
/* Machine IRQ to guest device/intx mapping. */
+ DECLARE_BITMAP(mapping, NR_PIRQS);
struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
/* Guest IRQ to guest device/intx mapping. */
struct hvm_girq_dpci_mapping girq[NR_IRQS];
diff -r b8f329d2c074 -r a11ad61bdb5b xen/include/xen/irq.h
--- a/xen/include/xen/irq.h Thu Oct 09 11:08:13 2008 +0100
+++ b/xen/include/xen/irq.h Thu Oct 09 11:14:52 2008 +0100
@@ -78,6 +78,8 @@ extern int pirq_guest_unmask(struct doma
extern int pirq_guest_unmask(struct domain *d);
extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
extern void pirq_guest_unbind(struct domain *d, int irq);
+extern irq_desc_t *domain_spin_lock_irq_desc(
+ struct domain *d, int irq, unsigned long *pflags);
static inline void set_native_irq_info(int irq, cpumask_t mask)
{
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|