# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1235127917 0
# Node ID 4771bceb188997073b5b93271f31064cd2610dbe
# Parent 84af3ded5b02ba0b3f8647e3bfa993725428633b
AMD IOMMU: clean up spinlock usage to satisfy check_lock().
Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
xen/drivers/passthrough/amd/iommu_init.c | 3 --
xen/drivers/passthrough/amd/iommu_intr.c | 14 ------------
xen/drivers/passthrough/amd/iommu_map.c | 31 +++++++++++++---------------
xen/drivers/passthrough/amd/pci_amd_iommu.c | 23 +++++++++-----------
4 files changed, 26 insertions(+), 45 deletions(-)
diff -r 84af3ded5b02 -r 4771bceb1889 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c Thu Feb 19 11:07:33 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_init.c Fri Feb 20 11:05:17 2009 +0000
@@ -37,9 +37,6 @@ struct list_head amd_iommu_head;
struct list_head amd_iommu_head;
struct table_struct device_table;
-extern void *int_remap_table;
-extern spinlock_t int_remap_table_lock;
-
static int __init map_iommu_mmio_region(struct amd_iommu *iommu)
{
unsigned long mfn;
diff -r 84af3ded5b02 -r 4771bceb1889 xen/drivers/passthrough/amd/iommu_intr.c
--- a/xen/drivers/passthrough/amd/iommu_intr.c Thu Feb 19 11:07:33 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_intr.c Fri Feb 20 11:05:17 2009 +0000
@@ -23,7 +23,7 @@
#include <asm/hvm/svm/amd-iommu-proto.h>
#define INTREMAP_TABLE_ORDER 1
-DEFINE_SPINLOCK(int_remap_table_lock);
+static DEFINE_SPINLOCK(int_remap_table_lock);
void *int_remap_table = NULL;
static u8 *get_intremap_entry(u8 vector, u8 dm)
@@ -110,21 +110,13 @@ static void update_intremap_entry_from_i
int __init amd_iommu_setup_intremap_table(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&int_remap_table_lock, flags);
-
if ( int_remap_table == NULL )
{
int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
if ( int_remap_table == NULL )
- {
- spin_unlock_irqrestore(&int_remap_table_lock, flags);
return -ENOMEM;
- }
memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
}
- spin_unlock_irqrestore(&int_remap_table_lock, flags);
return 0;
}
@@ -210,15 +202,11 @@ void amd_iommu_msi_msg_update_ire(
int __init deallocate_intremap_table(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&int_remap_table_lock, flags);
if ( int_remap_table )
{
__free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
int_remap_table = NULL;
}
- spin_unlock_irqrestore(&int_remap_table_lock, flags);
return 0;
}
diff -r 84af3ded5b02 -r 4771bceb1889 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c Thu Feb 19 11:07:33 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_map.c Fri Feb 20 11:05:17 2009 +0000
@@ -446,14 +446,13 @@ int amd_iommu_map_page(struct domain *d,
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
{
u64 iommu_l2e;
- unsigned long flags;
struct hvm_iommu *hd = domain_hvm_iommu(d);
int iw = IOMMU_IO_WRITE_ENABLED;
int ir = IOMMU_IO_READ_ENABLED;
BUG_ON( !hd->root_table );
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( is_hvm_domain(d) && !hd->p2m_synchronized )
goto out;
@@ -461,14 +460,14 @@ int amd_iommu_map_page(struct domain *d,
iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
if ( iommu_l2e == 0 )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
return -EFAULT;
}
set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
out:
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
@@ -481,11 +480,11 @@ int amd_iommu_unmap_page(struct domain *
BUG_ON( !hd->root_table );
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( is_hvm_domain(d) && !hd->p2m_synchronized )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
@@ -493,14 +492,14 @@ int amd_iommu_unmap_page(struct domain *
if ( iommu_l2e == 0 )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
return -EFAULT;
}
/* mark PTE as 'page not present' */
clear_iommu_l1e_present(iommu_l2e, gfn);
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
/* send INVALIDATE_IOMMU_PAGES command */
for_each_amd_iommu ( iommu )
@@ -520,12 +519,12 @@ int amd_iommu_reserve_domain_unity_map(
unsigned long size, int iw, int ir)
{
u64 iommu_l2e;
- unsigned long flags, npages, i;
+ unsigned long npages, i;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
npages = region_to_pages(phys_addr, size);
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
for ( i = 0; i < npages; ++i )
{
iommu_l2e = iommu_l2e_from_pfn(
@@ -533,7 +532,7 @@ int amd_iommu_reserve_domain_unity_map(
if ( iommu_l2e == 0 )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry phys_addr = %lx\n",
phys_addr);
return -EFAULT;
@@ -544,13 +543,13 @@ int amd_iommu_reserve_domain_unity_map(
phys_addr += PAGE_SIZE;
}
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
int amd_iommu_sync_p2m(struct domain *d)
{
- unsigned long mfn, gfn, flags;
+ unsigned long mfn, gfn;
u64 iommu_l2e;
struct page_info *page;
struct hvm_iommu *hd;
@@ -562,7 +561,7 @@ int amd_iommu_sync_p2m(struct domain *d)
hd = domain_hvm_iommu(d);
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( hd->p2m_synchronized )
goto out;
@@ -582,7 +581,7 @@ int amd_iommu_sync_p2m(struct domain *d)
if ( iommu_l2e == 0 )
{
spin_unlock(&d->page_alloc_lock);
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
return -EFAULT;
}
@@ -595,7 +594,7 @@ int amd_iommu_sync_p2m(struct domain *d)
hd->p2m_synchronized = 1;
out:
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
diff -r 84af3ded5b02 -r 4771bceb1889 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Feb 19 11:07:33
2009 +0000
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Feb 20 11:05:17
2009 +0000
@@ -172,22 +172,18 @@ static int allocate_domain_resources(str
static int allocate_domain_resources(struct hvm_iommu *hd)
{
/* allocate root table */
- unsigned long flags;
-
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( !hd->root_table )
{
hd->root_table = alloc_amd_iommu_pgtable();
if ( !hd->root_table )
- goto error_out;
- }
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
-
- return 0;
-
- error_out:
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
- return -ENOMEM;
+ {
+ spin_unlock(&hd->mapping_lock);
+ return -ENOMEM;
+ }
+ }
+ spin_unlock(&hd->mapping_lock);
+ return 0;
}
static int get_paging_mode(unsigned long entries)
@@ -298,7 +294,6 @@ static int reassign_device( struct domai
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
- spin_unlock(&pcidevs_lock);
return 0;
}
@@ -352,11 +347,13 @@ static void deallocate_iommu_page_tables
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ spin_lock(&hd->mapping_lock);
if ( hd->root_table )
{
deallocate_next_page_table(hd->root_table, hd->paging_mode);
hd->root_table = NULL;
}
+ spin_unlock(&hd->mapping_lock);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|