Hi, Isaku
This patch targets for enhancing vt-d support for ia64, could you help
to review?
Xiantao
# HG changeset patch
# User xiantao@xxxxxxxxxxxxxxxxxxxxxx
# Date 1234244822 -28800
# Node ID 67f2e14613efc0a18924fd60f2561999b9f59a43
# Parent 4fd4dcf2f8916ab4656911a76e52fc6b1ad42c2f
[IA64] Enhance vt-d support for ia64
Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Wed Jan 28 12:22:58 2009 +0900
+++ b/xen/arch/ia64/xen/domain.c Tue Feb 10 13:47:02 2009 +0800
@@ -1999,6 +1999,7 @@ static void __init calc_dom0_size(void)
unsigned long p2m_pages;
unsigned long spare_hv_pages;
unsigned long max_dom0_size;
+ unsigned long iommu_pg_table_pages = 0;
/* Estimate maximum memory we can safely allocate for dom0
* by subtracting the p2m table allocation and a chunk of memory
@@ -2009,8 +2010,13 @@ static void __init calc_dom0_size(void)
domheap_pages = avail_domheap_pages();
p2m_pages = domheap_pages / PTRS_PER_PTE;
spare_hv_pages = 8192 + (domheap_pages / 4096);
- max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages))
- * PAGE_SIZE;
+
+ if (iommu_enabled)
+ iommu_pg_table_pages = domheap_pages * 4 / 512;
+ /* There are 512 ptes in one 4K vtd page. */
+
+ max_dom0_size = (domheap_pages - (p2m_pages + spare_hv_pages) -
+ iommu_pg_table_pages) * PAGE_SIZE;
printk("Maximum permitted dom0 size: %luMB\n",
max_dom0_size / (1024*1024));
diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/ia64/vtd.c
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c Wed Jan 28 12:22:58 2009 +0900
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c Tue Feb 10 13:47:02 2009 +0800
@@ -111,3 +111,34 @@ void hvm_dpci_isairq_eoi(struct domain *
{
/* dummy */
}
+
+static int dom0_set_iommu_mapping(unsigned long start, unsigned long end,
+ void *arg)
+{
+ unsigned long tmp, pfn, j, page_addr = start;
+ struct domain *d = (struct domain *)arg;
+
+ /*
+ * Set up 1:1 page table for dom0 except the critical segments
+ * like Xen and tboot.
+ */
+
+ while (page_addr < end)
+ {
+ if (xen_in_range(page_addr, page_addr + PAGE_SIZE))
+ continue;
+
+ pfn = page_addr >> PAGE_SHIFT;
+ tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
+ for ( j = 0; j < tmp; j++ )
+ iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j));
+
+ page_addr += PAGE_SIZE;
+ }
+}
+
+void iommu_dom0_do_mapping(struct domain *d)
+{
+ BUG_ON(d != dom0);
+ efi_memmap_walk(dom0_set_iommu_mapping, d);
+}
diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c Wed Jan 28 12:22:58 2009 +0900
+++ b/xen/drivers/passthrough/vtd/iommu.c Tue Feb 10 13:47:02 2009 +0800
@@ -829,7 +829,6 @@ static void dma_msi_data_init(struct iom
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-#ifdef SUPPORT_MSI_REMAPPING
static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
{
u64 msi_address;
@@ -846,12 +845,6 @@ static void dma_msi_addr_init(struct iom
dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32)(msi_address >> 32));
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-#else
-static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
-{
- /* ia64: TODO */
-}
-#endif
static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest)
{
@@ -993,24 +986,7 @@ static int intel_iommu_domain_init(struc
if ( d->domain_id == 0 )
{
- extern int xen_in_range(paddr_t start, paddr_t end);
- extern int tboot_in_range(paddr_t start, paddr_t end);
-
- /*
- * Set up 1:1 page table for dom0 except the critical segments
- * like Xen and tboot.
- */
- for ( i = 0; i < max_page; i++ )
- {
- if ( xen_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) ||
- tboot_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) )
- continue;
-
- tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
- for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, (i*tmp+j), (i*tmp+j));
- }
-
+ iommu_dom0_do_mapping(d);
setup_dom0_devices(d);
setup_dom0_rmrr(d);
diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c Wed Jan 28 12:22:58 2009 +0900
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c Tue Feb 10 13:47:02 2009 +0800
@@ -142,3 +142,25 @@ void hvm_dpci_isairq_eoi(struct domain *
}
spin_unlock(&d->event_lock);
}
+
+void iommu_dom0_do_mapping(struct domain *d)
+{
+ extern int xen_in_range(paddr_t start, paddr_t end);
+ extern int tboot_in_range(paddr_t start, paddr_t end);
+
+ BUG_ON(d != dom0);
+ /*
+ * Set up 1:1 page table for dom0 except the critical segments
+ * like Xen and tboot.
+ */
+ for ( i = 0; i < max_page; i++ )
+ {
+ if ( xen_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) ||
+ tboot_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) )
+ continue;
+
+ tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
+ for ( j = 0; j < tmp; j++ )
+ iommu_map_page(d, (i*tmp+j), (i*tmp+j));
+ }
+}
diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/include/asm-ia64/msi.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/msi.h Tue Feb 10 13:47:02 2009 +0800
@@ -0,0 +1,20 @@
+#ifndef __ASM_MSI_H
+#define __ASM_MSI_H
+
+/*
+ * MSI Defined Data Structures
+ */
+#define MSI_ADDRESS_HEADER 0xfee
+#define MSI_ADDRESS_HEADER_SHIFT 12
+#define MSI_ADDRESS_HEADER_MASK 0xfff000
+#define MSI_ADDRESS_DEST_ID_MASK 0xfff0000f
+#define MSI_TARGET_CPU_MASK 0xff
+#define MSI_TARGET_CPU_SHIFT 4
+#define MSI_DELIVERY_MODE 0
+#define MSI_LEVEL_MODE 1 /* Edge always assert */
+#define MSI_TRIGGER_MODE 0 /* MSI is edge sensitive */
+#define MSI_PHYSICAL_MODE 0
+#define MSI_LOGICAL_MODE 1
+#define MSI_REDIRECTION_HINT_MODE 0
+
+#endif /* __ASM_MSI_H */
diff -r 4fd4dcf2f891 -r 67f2e14613ef xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h Wed Jan 28 12:22:58 2009 +0900
+++ b/xen/include/xen/iommu.h Tue Feb 10 13:47:02 2009 +0800
@@ -19,6 +19,8 @@
#ifndef _IOMMU_H_
#define _IOMMU_H_
+
+#include <asm/msi.h>
#include <xen/init.h>
#include <xen/spinlock.h>
@@ -113,4 +115,6 @@ void iommu_suspend(void);
void iommu_suspend(void);
void iommu_resume(void);
+void iommu_dom0_do_mapping(struct domain *d);
+
#endif /* _IOMMU_H_ */
vt-d.patch
Description: vt-d.patch
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|