# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1215776925 -3600
# Node ID 1e24033fb775b74c85df0dddd9bde632bd576151
# Parent a762b4aed1a8f0339e2822da728b9d4a71f32033
amd iommu: add interrupt remapping support
Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
xen/drivers/passthrough/amd/Makefile | 1
xen/drivers/passthrough/amd/iommu_intr.c | 205 ++++++++++++++++++++++++++
xen/drivers/passthrough/amd/iommu_map.c | 49 +++++-
xen/drivers/passthrough/amd/pci_amd_iommu.c | 15 +
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h | 66 +++++---
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 10 +
6 files changed, 320 insertions(+), 26 deletions(-)
diff -r a762b4aed1a8 -r 1e24033fb775 xen/drivers/passthrough/amd/Makefile
--- a/xen/drivers/passthrough/amd/Makefile Fri Jul 11 12:47:50 2008 +0100
+++ b/xen/drivers/passthrough/amd/Makefile Fri Jul 11 12:48:45 2008 +0100
@@ -3,3 +3,4 @@ obj-y += iommu_map.o
obj-y += iommu_map.o
obj-y += pci_amd_iommu.o
obj-y += iommu_acpi.o
+obj-y += iommu_intr.o
diff -r a762b4aed1a8 -r 1e24033fb775 xen/drivers/passthrough/amd/iommu_intr.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/drivers/passthrough/amd/iommu_intr.c Fri Jul 11 12:48:45 2008 +0100
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ * Author: Wei Wang <wei.wang2@xxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <xen/sched.h>
+#include <xen/hvm/iommu.h>
+#include <asm/amd-iommu.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
+
+DEFINE_SPINLOCK(int_remap_table_lock);
+void *int_remap_table = NULL;
+
+u8* get_intremap_entry(u8 vector, u8 dm)
+{
+ u8 *table;
+ int offset = 0;
+ table = (u8*)int_remap_table;
+
+ BUG_ON( !table );
+ offset = (dm << INT_REMAP_INDEX_DM_SHIFT) & INT_REMAP_INDEX_DM_MASK;
+ offset |= (vector << INT_REMAP_INDEX_VECTOR_SHIFT ) &
+ INT_REMAP_INDEX_VECTOR_MASK;
+
+ return (u8*) (table + offset);
+}
+
+static void update_intremap_entry(u32* entry, u8 vector, u8 int_type,
+ u8 dest_mode, u8 dest)
+{
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
+ INT_REMAP_ENTRY_REMAPEN_MASK,
+ INT_REMAP_ENTRY_REMAPEN_SHIFT, entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry,
+ INT_REMAP_ENTRY_SUPIOPF_MASK,
+ INT_REMAP_ENTRY_SUPIOPF_SHIFT, entry);
+ set_field_in_reg_u32(int_type, *entry,
+ INT_REMAP_ENTRY_INTTYPE_MASK,
+ INT_REMAP_ENTRY_INTTYPE_SHIFT, entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry,
+ INT_REMAP_ENTRY_REQEOI_MASK,
+ INT_REMAP_ENTRY_REQEOI_SHIFT, entry);
+ set_field_in_reg_u32((u32)dest_mode, *entry,
+ INT_REMAP_ENTRY_DM_MASK,
+ INT_REMAP_ENTRY_DM_SHIFT, entry);
+ set_field_in_reg_u32((u32)dest, *entry,
+ INT_REMAP_ENTRY_DEST_MAST,
+ INT_REMAP_ENTRY_DEST_SHIFT, entry);
+ set_field_in_reg_u32((u32)vector, *entry,
+ INT_REMAP_ENTRY_VECTOR_MASK,
+ INT_REMAP_ENTRY_VECTOR_SHIFT, entry);
+}
+
+void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
+{
+ u32 cmd[4], entry;
+
+ cmd[3] = cmd[2] = 0;
+ set_field_in_reg_u32(device_id, 0,
+ IOMMU_INV_INT_TABLE_DEVICE_ID_MASK,
+ IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry);
+ cmd[0] = entry;
+ set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0,
+ IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+ &entry);
+ cmd[1] = entry;
+ send_iommu_command(iommu, cmd);
+}
+
+static void update_intremap_entry_from_ioapic(
+ struct IO_APIC_route_entry *ioapic_rte,
+ unsigned int rte_upper, unsigned int value)
+{
+ unsigned long flags;
+ u32* entry;
+ u8 delivery_mode, dest, vector, dest_mode;
+ struct IO_APIC_route_entry *rte = ioapic_rte;
+
+ spin_lock_irqsave(&int_remap_table_lock, flags);
+
+ if ( rte_upper )
+ {
+ dest = (value >> 24) & 0xFF;
+ delivery_mode = rte->delivery_mode;
+ vector = rte->vector;
+ dest_mode = rte->dest_mode;
+ entry = (u32*)get_intremap_entry((u8)rte->vector,
+ (u8)rte->delivery_mode);
+ update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+ }
+
+ spin_unlock_irqrestore(&int_remap_table_lock, flags);
+ return;
+}
+
+int amd_iommu_setup_intremap_table(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&int_remap_table_lock, flags);
+ if ( int_remap_table == NULL )
+ int_remap_table = (void *)alloc_xenheap_pages(1);
+ if ( !int_remap_table )
+ {
+ spin_unlock_irqrestore(&int_remap_table_lock, flags);
+ return -ENOMEM;
+ }
+ memset((u8*)int_remap_table, 0, PAGE_SIZE*2);
+ spin_unlock_irqrestore(&int_remap_table_lock, flags);
+
+ return 0;
+}
+
+void amd_iommu_ioapic_update_ire(
+ unsigned int apic, unsigned int reg, unsigned int value)
+{
+ struct IO_APIC_route_entry ioapic_rte = { 0 };
+ unsigned int rte_upper = (reg & 1) ? 1 : 0;
+ int saved_mask;
+
+ *IO_APIC_BASE(apic) = reg;
+ *(IO_APIC_BASE(apic)+4) = value;
+
+ if ( int_remap_table == NULL )
+ return;
+ if ( !rte_upper )
+ return;
+
+ reg--;
+ /* read both lower and upper 32-bits of rte entry */
+ *IO_APIC_BASE(apic) = reg;
+ *(((u32 *)&ioapic_rte) + 0) = *(IO_APIC_BASE(apic)+4);
+ *IO_APIC_BASE(apic) = reg + 1;
+ *(((u32 *)&ioapic_rte) + 1) = *(IO_APIC_BASE(apic)+4);
+
+ /* mask the interrupt while we change the intremap table */
+ saved_mask = ioapic_rte.mask;
+ ioapic_rte.mask = 1;
+ *IO_APIC_BASE(apic) = reg;
+ *(IO_APIC_BASE(apic)+4) = *(((int *)&ioapic_rte)+0);
+ ioapic_rte.mask = saved_mask;
+
+ update_intremap_entry_from_ioapic(&ioapic_rte, rte_upper, value);
+
+ /* unmask the interrupt after we have updated the intremap table */
+ *IO_APIC_BASE(apic) = reg;
+ *(IO_APIC_BASE(apic)+4) = *(((u32 *)&ioapic_rte)+0);
+}
+
+static void update_intremap_entry_from_msi_msg(
+ struct amd_iommu *iommu, struct pci_dev *pdev, struct msi_msg *msg)
+{
+ unsigned long flags;
+ u32* entry;
+ u16 dev_id;
+
+ u8 delivery_mode, dest, vector, dest_mode;
+
+ dev_id = (pdev->bus << 8) | pdev->devfn;
+
+ spin_lock_irqsave(&int_remap_table_lock, flags);
+ dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
+ delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
+ vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK;
+ dest = (msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff;
+
+ entry = (u32*)get_intremap_entry((u8)vector, (u8)delivery_mode);
+ update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+ spin_unlock_irqrestore(&int_remap_table_lock, flags);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ invalidate_interrupt_table(iommu, dev_id);
+ flush_command_buffer(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return;
+}
+
+void amd_iommu_msi_msg_update_ire(
+ struct msi_desc *msi_desc, struct msi_msg *msg)
+{
+ struct pci_dev *pdev = msi_desc->dev;
+ struct amd_iommu *iommu = NULL;
+
+ iommu = find_iommu_for_device(pdev->bus, pdev->devfn);
+
+ if ( !iommu || !int_remap_table )
+ return;
+
+ update_intremap_entry_from_msi_msg(iommu, pdev, msg);
+}
diff -r a762b4aed1a8 -r 1e24033fb775 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c Fri Jul 11 12:47:50 2008 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c Fri Jul 11 12:48:45 2008 +0100
@@ -235,13 +235,56 @@ static void amd_iommu_set_page_directory
pde[0] = entry;
}
-void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
- u8 sys_mgt, u8 dev_ex, u8 paging_mode)
+void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
+ u16 domain_id, u8 sys_mgt, u8 dev_ex,
+ u8 paging_mode)
{
u64 addr_hi, addr_lo;
u32 entry;
- dte[7] = dte[6] = dte[5] = dte[4] = 0;
+ dte[7] = dte[6] = 0;
+
+ addr_lo = intremap_ptr & DMA_32BIT_MASK;
+ addr_hi = intremap_ptr >> 32;
+
+ set_field_in_reg_u32((u32)addr_hi, 0,
+ IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK,
+ IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+ IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK,
+ IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+ IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK,
+ IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+ IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK,
+ IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT, &entry);
+ /* Fixed and arbitrated interrupts remapepd */
+ set_field_in_reg_u32(2, entry,
+ IOMMU_DEV_TABLE_INT_CONTROL_MASK,
+ IOMMU_DEV_TABLE_INT_CONTROL_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+ IOMMU_DEV_TABLE_LINT0_ENABLE_MASK,
+ IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+ IOMMU_DEV_TABLE_LINT1_ENABLE_MASK,
+ IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT, &entry);
+ dte[5] = entry;
+
+ set_field_in_reg_u32((u32)addr_lo >> 6, 0,
+ IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK,
+ IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT, &entry);
+ /* 2048 entries */
+ set_field_in_reg_u32(0xB, entry,
+ IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK,
+ IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+ IOMMU_DEV_TABLE_INT_VALID_MASK,
+ IOMMU_DEV_TABLE_INT_VALID_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+ IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK,
+ IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT, &entry);
+ dte[4] = entry;
set_field_in_reg_u32(sys_mgt, 0,
IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
diff -r a762b4aed1a8 -r 1e24033fb775 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Jul 11 12:47:50
2008 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Jul 11 12:48:45
2008 +0100
@@ -34,6 +34,7 @@ int nr_amd_iommus;
unsigned short ivrs_bdf_entries;
struct ivrs_mappings *ivrs_mappings;
+extern void *int_remap_table;
static void deallocate_domain_page_tables(struct hvm_iommu *hd)
{
@@ -256,12 +257,13 @@ static void amd_iommu_setup_domain_devic
{
void *dte;
u64 root_ptr;
+ u64 intremap_ptr;
unsigned long flags;
int req_id;
u8 sys_mgt, dev_ex;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
- BUG_ON( !hd->root_table || !hd->paging_mode );
+ BUG_ON( !hd->root_table || !hd->paging_mode || !int_remap_table );
root_ptr = (u64)virt_to_maddr(hd->root_table);
/* get device-table entry */
@@ -269,6 +271,8 @@ static void amd_iommu_setup_domain_devic
dte = iommu->dev_table.buffer +
(req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ intremap_ptr = (u64)virt_to_maddr(int_remap_table);
+
if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
spin_lock_irqsave(&iommu->lock, flags);
@@ -276,11 +280,12 @@ static void amd_iommu_setup_domain_devic
/* bind DTE to domain page-tables */
sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
- amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
+ amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr, intremap_ptr,
hd->domain_id, sys_mgt, dev_ex,
hd->paging_mode);
invalidate_dev_table_entry(iommu, req_id);
+ invalidate_interrupt_table(iommu, req_id);
flush_command_buffer(iommu);
amd_iov_info("Enable DTE:0x%x, "
"root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
@@ -364,6 +369,12 @@ int amd_iov_detect(void)
}
memset(ivrs_mappings, 0,
ivrs_bdf_entries * sizeof(struct ivrs_mappings));
+
+ if ( amd_iommu_setup_intremap_table() != 0 )
+ {
+ amd_iov_error("Error allocating interrupt remapping table\n");
+ goto error_out;
+ }
if ( amd_iommu_init() != 0 )
{
diff -r a762b4aed1a8 -r 1e24033fb775
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Fri Jul 11 12:47:50
2008 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Fri Jul 11 12:48:45
2008 +0100
@@ -195,28 +195,30 @@
#define IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT 8
/* DeviceTable Entry[159:128] */
-#define IOMMU_DEV_TABLE_INT_VALID_MASK 0x00000001
-#define IOMMU_DEV_TABLE_INT_VALID_SHIFT 0
-#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK 0x0000001E
-#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT 1
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK 0xFFFFFFC0
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT 6
+#define IOMMU_DEV_TABLE_INT_VALID_MASK 0x00000001
+#define IOMMU_DEV_TABLE_INT_VALID_SHIFT 0
+#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK 0x0000001E
+#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT 1
+#define IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK 0x0000000020
+#define IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT 5
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK 0xFFFFFFC0
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT 6
/* DeviceTable Entry[191:160] */
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK 0x000FFFFF
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT 0
-#define IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK 0x01000000
-#define IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT 24
-#define IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK 0x02000000
-#define IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT 25
-#define IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK 0x04000000
-#define IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT 26
-#define IOMMU_DEV_TABLE_INT_CONTROL_MASK 0x30000000
-#define IOMMU_DEV_TABLE_INT_CONTROL_SHIFT 28
-#define IOMMU_DEV_TABLE_LINT0_ENABLE_MASK 0x40000000
-#define IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT 30
-#define IOMMU_DEV_TABLE_LINT1_ENABLE_MASK 0x80000000
-#define IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT 31
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK 0x000FFFFF
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT 0
+#define IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK 0x01000000
+#define IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT 24
+#define IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK 0x02000000
+#define IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT 25
+#define IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK 0x04000000
+#define IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT 26
+#define IOMMU_DEV_TABLE_INT_CONTROL_MASK 0x30000000
+#define IOMMU_DEV_TABLE_INT_CONTROL_SHIFT 28
+#define IOMMU_DEV_TABLE_LINT0_ENABLE_MASK 0x40000000
+#define IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT 30
+#define IOMMU_DEV_TABLE_LINT1_ENABLE_MASK 0x80000000
+#define IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT 31
/* Command Buffer */
#define IOMMU_CMD_BUFFER_BASE_LOW_OFFSET 0x08
@@ -275,6 +277,10 @@
/* INVALIDATE_DEVTAB_ENTRY command */
#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK 0x0000FFFF
#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT 0
+
+/* INVALIDATE_INTERRUPT_TABLE command */
+#define IOMMU_INV_INT_TABLE_DEVICE_ID_MASK 0x0000FFFF
+#define IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT 0
/* Event Log */
#define IOMMU_EVENT_LOG_BASE_LOW_OFFSET 0x10
@@ -435,4 +441,24 @@
#define IOMMU_IO_READ_ENABLED 1
#define HACK_BIOS_SETTINGS 0
+/* interrupt remapping table */
+#define INT_REMAP_INDEX_DM_MASK 0x1C00
+#define INT_REMAP_INDEX_DM_SHIFT 10
+#define INT_REMAP_INDEX_VECTOR_MASK 0x3FC
+#define INT_REMAP_INDEX_VECTOR_SHIFT 2
+#define INT_REMAP_ENTRY_REMAPEN_MASK 0x00000001
+#define INT_REMAP_ENTRY_REMAPEN_SHIFT 0
+#define INT_REMAP_ENTRY_SUPIOPF_MASK 0x00000002
+#define INT_REMAP_ENTRY_SUPIOPF_SHIFT 1
+#define INT_REMAP_ENTRY_INTTYPE_MASK 0x0000001C
+#define INT_REMAP_ENTRY_INTTYPE_SHIFT 2
+#define INT_REMAP_ENTRY_REQEOI_MASK 0x00000020
+#define INT_REMAP_ENTRY_REQEOI_SHIFT 5
+#define INT_REMAP_ENTRY_DM_MASK 0x00000040
+#define INT_REMAP_ENTRY_DM_SHIFT 6
+#define INT_REMAP_ENTRY_DEST_MAST 0x0000FF00
+#define INT_REMAP_ENTRY_DEST_SHIFT 8
+#define INT_REMAP_ENTRY_VECTOR_MASK 0x00FF0000
+#define INT_REMAP_ENTRY_VECTOR_SHIFT 16
+
#endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
diff -r a762b4aed1a8 -r 1e24033fb775
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Jul 11 12:47:50
2008 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Jul 11 12:48:45
2008 +0100
@@ -70,7 +70,7 @@ int amd_iommu_sync_p2m(struct domain *d)
int amd_iommu_sync_p2m(struct domain *d);
/* device table functions */
-void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr,
+void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
int amd_iommu_is_dte_page_translation_valid(u32 *entry);
void invalidate_dev_table_entry(struct amd_iommu *iommu,
@@ -85,6 +85,14 @@ struct amd_iommu *find_iommu_for_device(
/* amd-iommu-acpi functions */
int __init parse_ivrs_table(struct acpi_table_header *table);
+
+/*interrupt remapping */
+int amd_iommu_setup_intremap_table(void);
+void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
+void amd_iommu_ioapic_update_ire(
+ unsigned int apic, unsigned int reg, unsigned int value);
+void amd_iommu_msi_msg_update_ire(
+ struct msi_desc *msi_desc, struct msi_msg *msg);
static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
{
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|