# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1204204909 0
# Node ID 0e22182446fae20e022a9c28a6ac6cda6cae4790
# Parent 36529ef3ef23180c52dc14d0571364a46c09a519
Add ACPI tables support for AMD IOMMU
Configuration information for AMD IOMMU control fields are descirbed
by I/O virtualization Reporting Structure (IVRS) table, this patch set
parses IVRS table and updates iommu control flags according to the result.
Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
xen/drivers/acpi/tables.c | 1
xen/drivers/passthrough/amd/Makefile | 1
xen/drivers/passthrough/amd/iommu_acpi.c | 874 ++++++++++++++++++++++++++
xen/drivers/passthrough/amd/iommu_detect.c | 36 -
xen/drivers/passthrough/amd/iommu_init.c | 41 +
xen/drivers/passthrough/amd/iommu_map.c | 42 +
xen/drivers/passthrough/amd/pci_amd_iommu.c | 142 +++-
xen/include/asm-x86/amd-iommu.h | 32
xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h | 176 +++++
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h | 6
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 24
xen/include/xen/acpi.h | 1
12 files changed, 1318 insertions(+), 58 deletions(-)
diff -r 36529ef3ef23 -r 0e22182446fa xen/drivers/acpi/tables.c
--- a/xen/drivers/acpi/tables.c Thu Feb 28 13:19:38 2008 +0000
+++ b/xen/drivers/acpi/tables.c Thu Feb 28 13:21:49 2008 +0000
@@ -60,6 +60,7 @@ static char *acpi_table_signatures[ACPI_
[ACPI_HPET] = "HPET",
[ACPI_MCFG] = "MCFG",
[ACPI_DMAR] = "DMAR",
+ [ACPI_IVRS] = "IVRS",
};
static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
diff -r 36529ef3ef23 -r 0e22182446fa xen/drivers/passthrough/amd/Makefile
--- a/xen/drivers/passthrough/amd/Makefile Thu Feb 28 13:19:38 2008 +0000
+++ b/xen/drivers/passthrough/amd/Makefile Thu Feb 28 13:21:49 2008 +0000
@@ -2,3 +2,4 @@ obj-y += iommu_init.o
obj-y += iommu_init.o
obj-y += iommu_map.o
obj-y += pci_amd_iommu.o
+obj-y += iommu_acpi.o
diff -r 36529ef3ef23 -r 0e22182446fa xen/drivers/passthrough/amd/iommu_acpi.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c Thu Feb 28 13:21:49 2008 +0000
@@ -0,0 +1,874 @@
+/*
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ * Author: Leo Duran <leo.duran@xxxxxxx>
+ * Author: Wei Wang <wei.wang2@xxxxxxx> - adapted to xen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <asm/amd-iommu.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
+#include <asm/hvm/svm/amd-iommu-acpi.h>
+
+extern unsigned long amd_iommu_page_entries;
+extern unsigned short ivrs_bdf_entries;
+extern struct ivrs_mappings *ivrs_mappings;
+
+static struct amd_iommu * __init find_iommu_from_bdf_cap(
+ u16 bdf, u8 cap_offset)
+{
+ struct amd_iommu *iommu;
+
+ for_each_amd_iommu( iommu )
+ if ( iommu->bdf == bdf && iommu->cap_offset == cap_offset )
+ return iommu;
+
+ return NULL;
+}
+
+static void __init reserve_iommu_exclusion_range(struct amd_iommu *iommu,
+ unsigned long base, unsigned long limit)
+{
+ /* need to extend exclusion range? */
+ if ( iommu->exclusion_enable )
+ {
+ if ( iommu->exclusion_base < base )
+ base = iommu->exclusion_base;
+ if ( iommu->exclusion_limit > limit )
+ limit = iommu->exclusion_limit;
+ }
+
+ iommu->exclusion_enable = IOMMU_CONTROL_ENABLED;
+ iommu->exclusion_base = base;
+ iommu->exclusion_limit = limit;
+}
+
+static void __init reserve_iommu_exclusion_range_all(struct amd_iommu *iommu,
+ unsigned long base, unsigned long limit)
+{
+ reserve_iommu_exclusion_range(iommu, base, limit);
+ iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED;
+}
+
+static void __init reserve_unity_map_for_device(u16 bdf, unsigned long base,
+ unsigned long length, u8 iw, u8 ir)
+{
+ unsigned long old_top, new_top;
+
+ /* need to extend unity-mapped range? */
+ if ( ivrs_mappings[bdf].unity_map_enable )
+ {
+ old_top = ivrs_mappings[bdf].addr_range_start +
+ ivrs_mappings[bdf].addr_range_length;
+ new_top = base + length;
+ if ( old_top > new_top )
+ new_top = old_top;
+ if ( ivrs_mappings[bdf].addr_range_start < base )
+ base = ivrs_mappings[bdf].addr_range_start;
+ length = new_top - base;
+ }
+
+ /* extend r/w permissioms and keep aggregate */
+ if ( iw )
+ ivrs_mappings[bdf].write_permission = IOMMU_CONTROL_ENABLED;
+ if ( ir )
+ ivrs_mappings[bdf].read_permission = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[bdf].addr_range_start = base;
+ ivrs_mappings[bdf].addr_range_length = length;
+}
+
+static int __init register_exclusion_range_for_all_devices(
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ unsigned long range_top, iommu_top, length;
+ struct amd_iommu *iommu;
+ u16 bdf;
+
+ /* is part of exclusion range inside of IOMMU virtual address space? */
+ /* note: 'limit' parameter is assumed to be page-aligned */
+ range_top = limit + PAGE_SIZE;
+ iommu_top = max_page * PAGE_SIZE;
+ if ( base < iommu_top )
+ {
+ if (range_top > iommu_top)
+ range_top = iommu_top;
+ length = range_top - base;
+ /* reserve r/w unity-mapped page entries for devices */
+ /* note: these entries are part of the exclusion range */
+ for (bdf = 0; bdf < ivrs_bdf_entries; ++bdf)
+ reserve_unity_map_for_device(bdf, base, length, iw, ir);
+ /* push 'base' just outside of virtual address space */
+ base = iommu_top;
+ }
+ /* register IOMMU exclusion range settings */
+ if (limit >= iommu_top)
+ {
+ for_each_amd_iommu( iommu )
+ reserve_iommu_exclusion_range_all(iommu, base, limit);
+ }
+
+ return 0;
+}
+
+static int __init register_exclusion_range_for_device(u16 bdf,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ unsigned long range_top, iommu_top, length;
+ struct amd_iommu *iommu;
+ u16 bus, devfn, req;
+
+ bus = bdf >> 8;
+ devfn = bdf & 0xFF;
+ iommu = find_iommu_for_device(bus, devfn);
+ if ( !iommu )
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf);
+ return -ENODEV;
+ }
+ req = ivrs_mappings[bdf].dte_requestor_id;
+
+ /* note: 'limit' parameter is assumed to be page-aligned */
+ range_top = limit + PAGE_SIZE;
+ iommu_top = max_page * PAGE_SIZE;
+ if ( base < iommu_top )
+ {
+ if (range_top > iommu_top)
+ range_top = iommu_top;
+ length = range_top - base;
+ /* reserve unity-mapped page entries for device */
+ /* note: these entries are part of the exclusion range */
+ reserve_unity_map_for_device(bdf, base, length, iw, ir);
+ reserve_unity_map_for_device(req, base, length, iw, ir);
+
+ /* push 'base' just outside of virtual address space */
+ base = iommu_top;
+ }
+
+ /* register IOMMU exclusion range settings for device */
+ if ( limit >= iommu_top )
+ {
+ reserve_iommu_exclusion_range(iommu, base, limit);
+ ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[req].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
+ }
+
+ return 0;
+}
+
+static int __init register_exclusion_range_for_iommu_devices(
+ struct amd_iommu *iommu,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ unsigned long range_top, iommu_top, length;
+ u16 bus, devfn, bdf, req;
+
+ /* is part of exclusion range inside of IOMMU virtual address space? */
+ /* note: 'limit' parameter is assumed to be page-aligned */
+ range_top = limit + PAGE_SIZE;
+ iommu_top = max_page * PAGE_SIZE;
+ if ( base < iommu_top )
+ {
+ if (range_top > iommu_top)
+ range_top = iommu_top;
+ length = range_top - base;
+ /* reserve r/w unity-mapped page entries for devices */
+ /* note: these entries are part of the exclusion range */
+ for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
+ {
+ bus = bdf >> 8;
+ devfn = bdf & 0xFF;
+ if ( iommu == find_iommu_for_device(bus, devfn) )
+ {
+ reserve_unity_map_for_device(bdf, base, length, iw, ir);
+ req = ivrs_mappings[bdf].dte_requestor_id;
+ reserve_unity_map_for_device(req, base, length, iw, ir);
+ }
+ }
+
+ /* push 'base' just outside of virtual address space */
+ base = iommu_top;
+ }
+
+ /* register IOMMU exclusion range settings */
+ if (limit >= iommu_top)
+ reserve_iommu_exclusion_range_all(iommu, base, limit);
+ return 0;
+}
+
+static int __init parse_ivmd_device_select(
+ struct acpi_ivmd_block_header *ivmd_block,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ u16 bdf;
+
+ bdf = ivmd_block->header.dev_id;
+ if (bdf >= ivrs_bdf_entries)
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: Invalid Dev_Id 0x%x\n", bdf);
+ return -ENODEV;
+ }
+
+ return register_exclusion_range_for_device(bdf, base, limit, iw, ir);
+}
+
+static int __init parse_ivmd_device_range(
+ struct acpi_ivmd_block_header *ivmd_block,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ u16 first_bdf, last_bdf, bdf;
+ int error;
+
+ first_bdf = ivmd_block->header.dev_id;
+ if (first_bdf >= ivrs_bdf_entries)
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: "
+ "Invalid Range_First Dev_Id 0x%x\n", first_bdf);
+ return -ENODEV;
+ }
+
+ last_bdf = ivmd_block->last_dev_id;
+ if (last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf)
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: "
+ "Invalid Range_Last Dev_Id 0x%x\n", last_bdf);
+ return -ENODEV;
+ }
+
+ dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+
+ for ( bdf = first_bdf, error = 0;
+ bdf <= last_bdf && !error; ++bdf )
+ {
+ error = register_exclusion_range_for_device(
+ bdf, base, limit, iw, ir);
+ }
+
+ return error;
+}
+
+static int __init parse_ivmd_device_iommu(
+ struct acpi_ivmd_block_header *ivmd_block,
+ unsigned long base, unsigned long limit, u8 iw, u8 ir)
+{
+ struct amd_iommu *iommu;
+
+ /* find target IOMMU */
+ iommu = find_iommu_from_bdf_cap(ivmd_block->header.dev_id,
+ ivmd_block->cap_offset);
+ if ( !iommu )
+ {
+ dprintk(XENLOG_ERR,
+ "IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
+ ivmd_block->header.dev_id, ivmd_block->cap_offset);
+ return -ENODEV;
+ }
+
+ return register_exclusion_range_for_iommu_devices(
+ iommu, base, limit, iw, ir);
+}
+
+static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block)
+{
+ unsigned long start_addr, mem_length, base, limit;
+ u8 iw, ir;
+
+ if (ivmd_block->header.length <
+ sizeof(struct acpi_ivmd_block_header))
+ {
+ dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n");
+ return -ENODEV;
+ }
+
+ start_addr = (unsigned long)ivmd_block->start_addr;
+ mem_length = (unsigned long)ivmd_block->mem_length;
+ base = start_addr & PAGE_MASK;
+ limit = (start_addr + mem_length - 1) & PAGE_MASK;
+
+ dprintk(XENLOG_INFO, "IVMD Block: Type 0x%x\n",
+ ivmd_block->header.type);
+ dprintk(XENLOG_INFO, " Start_Addr_Phys 0x%lx\n", start_addr);
+ dprintk(XENLOG_INFO, " Mem_Length 0x%lx\n", mem_length);
+
+ if ( get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK,
+ AMD_IOMMU_ACPI_EXCLUSION_RANGE_SHIFT) )
+ iw = ir = IOMMU_CONTROL_ENABLED;
+ else if ( get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_UNITY_MAPPING_MASK,
+ AMD_IOMMU_ACPI_UNITY_MAPPING_SHIFT) )
+ {
+ iw = get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_IW_PERMISSION_MASK,
+ AMD_IOMMU_ACPI_IW_PERMISSION_SHIFT);
+ ir = get_field_from_byte(ivmd_block->header.flags,
+ AMD_IOMMU_ACPI_IR_PERMISSION_MASK,
+ AMD_IOMMU_ACPI_IR_PERMISSION_SHIFT);
+ }
+ else
+ {
+ dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n");
+ return -ENODEV;
+ }
+
+ switch( ivmd_block->header.type )
+ {
+ case AMD_IOMMU_ACPI_IVMD_ALL_TYPE:
+ return register_exclusion_range_for_all_devices(
+ base, limit, iw, ir);
+
+ case AMD_IOMMU_ACPI_IVMD_ONE_TYPE:
+ return parse_ivmd_device_select(ivmd_block,
+ base, limit, iw, ir);
+
+ case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE:
+ return parse_ivmd_device_range(ivmd_block,
+ base, limit, iw, ir);
+
+ case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE:
+ return parse_ivmd_device_iommu(ivmd_block,
+ base, limit, iw, ir);
+
+ default:
+ dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Type!\n");
+ return -ENODEV;
+ }
+}
+
+static u16 __init parse_ivhd_device_padding(u16 pad_length,
+ u16 header_length, u16 block_length)
+{
+ if ( header_length < (block_length + pad_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ return pad_length;
+}
+
+static u16 __init parse_ivhd_device_select(
+ union acpi_ivhd_device *ivhd_device)
+{
+ u16 bdf;
+
+ bdf = ivhd_device->header.dev_id;
+ if ( bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ return 0;
+ }
+
+ /* override flags for device */
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+
+ return sizeof(struct acpi_ivhd_device_header);
+}
+
+static u16 __init parse_ivhd_device_range(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, first_bdf, last_bdf, bdf;
+ u8 sys_mgt;
+
+ dev_length = sizeof(struct acpi_ivhd_device_range);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ if ( ivhd_device->range.trailer.type !=
+ AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END) {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: End_Type 0x%x\n",
+ ivhd_device->range.trailer.type);
+ return 0;
+ }
+
+ first_bdf = ivhd_device->header.dev_id;
+ if ( first_bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ return 0;
+ }
+
+ last_bdf = ivhd_device->range.trailer.dev_id;
+ if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+
+ /* override flags for range of devices */
+ sys_mgt = get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
+ ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_alias(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, alias_id, bdf;
+
+ dev_length = sizeof(struct acpi_ivhd_device_alias);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ bdf = ivhd_device->header.dev_id;
+ if ( bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ return 0;
+ }
+
+ alias_id = ivhd_device->alias.dev_id;
+ if ( alias_id >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Alias Dev_Id 0x%x\n", alias_id);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
+
+ /* override requestor_id and flags for device */
+ ivrs_mappings[bdf].dte_requestor_id = alias_id;
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ ivrs_mappings[alias_id].dte_sys_mgt_enable =
+ ivrs_mappings[bdf].dte_sys_mgt_enable;
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_alias_range(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+
+ u16 dev_length, first_bdf, last_bdf, alias_id, bdf;
+ u8 sys_mgt;
+
+ dev_length = sizeof(struct acpi_ivhd_device_alias_range);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ if ( ivhd_device->alias_range.trailer.type !=
+ AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: End_Type 0x%x\n",
+ ivhd_device->alias_range.trailer.type);
+ return 0;
+ }
+
+ first_bdf = ivhd_device->header.dev_id;
+ if ( first_bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR,"IVHD Error: "
+ "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ return 0;
+ }
+
+ last_bdf = ivhd_device->alias_range.trailer.dev_id;
+ if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ return 0;
+ }
+
+ alias_id = ivhd_device->alias_range.alias.dev_id;
+ if ( alias_id >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Alias Dev_Id 0x%x\n", alias_id);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+ dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id);
+
+ /* override requestor_id and flags for range of devices */
+ sys_mgt = get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
+ {
+ ivrs_mappings[bdf].dte_requestor_id = alias_id;
+ ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
+ }
+ ivrs_mappings[alias_id].dte_sys_mgt_enable = sys_mgt;
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_extended(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, bdf;
+
+ dev_length = sizeof(struct acpi_ivhd_device_extended);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ bdf = ivhd_device->header.dev_id;
+ if ( bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Dev_Id 0x%x\n", bdf);
+ return 0;
+ }
+
+ /* override flags for device */
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+
+ return dev_length;
+}
+
+static u16 __init parse_ivhd_device_extended_range(
+ union acpi_ivhd_device *ivhd_device,
+ u16 header_length, u16 block_length)
+{
+ u16 dev_length, first_bdf, last_bdf, bdf;
+ u8 sys_mgt;
+
+ dev_length = sizeof(struct acpi_ivhd_device_extended_range);
+ if ( header_length < (block_length + dev_length) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device_Entry Length!\n");
+ return 0;
+ }
+
+ if ( ivhd_device->extended_range.trailer.type !=
+ AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: End_Type 0x%x\n",
+ ivhd_device->extended_range.trailer.type);
+ return 0;
+ }
+
+ first_bdf = ivhd_device->header.dev_id;
+ if ( first_bdf >= ivrs_bdf_entries )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: First Dev_Id 0x%x\n", first_bdf);
+ return 0;
+ }
+
+ last_bdf = ivhd_device->extended_range.trailer.dev_id;
+ if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Range: Last Dev_Id 0x%x\n", last_bdf);
+ return 0;
+ }
+
+ dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n",
+ first_bdf, last_bdf);
+
+ /* override flags for range of devices */
+ sys_mgt = get_field_from_byte(ivhd_device->header.flags,
+ AMD_IOMMU_ACPI_SYS_MGT_MASK,
+ AMD_IOMMU_ACPI_SYS_MGT_SHIFT);
+ for ( bdf = first_bdf; bdf <= last_bdf; ++bdf )
+ ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt;
+
+ return dev_length;
+}
+
+static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block)
+{
+ union acpi_ivhd_device *ivhd_device;
+ u16 block_length, dev_length;
+ struct amd_iommu *iommu;
+
+ if ( ivhd_block->header.length <
+ sizeof(struct acpi_ivhd_block_header) )
+ {
+ dprintk(XENLOG_ERR, "IVHD Error: Invalid Block Length!\n");
+ return -ENODEV;
+ }
+
+ iommu = find_iommu_from_bdf_cap(ivhd_block->header.dev_id,
+ ivhd_block->cap_offset);
+ if ( !iommu )
+ {
+ dprintk(XENLOG_ERR,
+ "IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n",
+ ivhd_block->header.dev_id, ivhd_block->cap_offset);
+ return -ENODEV;
+ }
+
+ dprintk(XENLOG_INFO, "IVHD Block:\n");
+ dprintk(XENLOG_INFO, " Cap_Offset 0x%x\n",
+ ivhd_block->cap_offset);
+ dprintk(XENLOG_INFO, " MMIO_BAR_Phys 0x%lx\n",
+ (unsigned long)ivhd_block->mmio_base);
+ dprintk(XENLOG_INFO, " PCI_Segment 0x%x\n",
+ ivhd_block->pci_segment);
+ dprintk(XENLOG_INFO, " IOMMU_Info 0x%x\n",
+ ivhd_block->iommu_info);
+
+ /* override IOMMU support flags */
+ iommu->coherent = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_COHERENT_MASK,
+ AMD_IOMMU_ACPI_COHERENT_SHIFT);
+ iommu->iotlb_support = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_IOTLB_SUP_MASK,
+ AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT);
+ iommu->isochronous = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_ISOC_MASK,
+ AMD_IOMMU_ACPI_ISOC_SHIFT);
+ iommu->res_pass_pw = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_RES_PASS_PW_MASK,
+ AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT);
+ iommu->pass_pw = get_field_from_byte(ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_PASS_PW_MASK,
+ AMD_IOMMU_ACPI_PASS_PW_SHIFT);
+ iommu->ht_tunnel_enable = get_field_from_byte(
+ ivhd_block->header.flags,
+ AMD_IOMMU_ACPI_HT_TUN_ENB_MASK,
+ AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT);
+
+ /* parse Device Entries */
+ block_length = sizeof(struct acpi_ivhd_block_header);
+ while( ivhd_block->header.length >=
+ (block_length + sizeof(struct acpi_ivhd_device_header)) )
+ {
+ ivhd_device = (union acpi_ivhd_device *)
+ ((u8 *)ivhd_block + block_length);
+
+ dprintk(XENLOG_INFO, "IVHD Device Entry:\n");
+ dprintk(XENLOG_INFO, " Type 0x%x\n",
+ ivhd_device->header.type);
+ dprintk(XENLOG_INFO, " Dev_Id 0x%x\n",
+ ivhd_device->header.dev_id);
+ dprintk(XENLOG_INFO, " Flags 0x%x\n",
+ ivhd_device->header.flags);
+
+ switch( ivhd_device->header.type )
+ {
+ case AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD:
+ dev_length = parse_ivhd_device_padding(
+ sizeof(u32),
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_U64_PAD:
+ dev_length = parse_ivhd_device_padding(
+ sizeof(u64),
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_SELECT:
+ dev_length = parse_ivhd_device_select(ivhd_device);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START:
+ dev_length = parse_ivhd_device_range(ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT:
+ dev_length = parse_ivhd_device_alias(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_RANGE:
+ dev_length = parse_ivhd_device_alias_range(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_EXT_SELECT:
+ dev_length = parse_ivhd_device_extended(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ case AMD_IOMMU_ACPI_IVHD_DEV_EXT_RANGE:
+ dev_length = parse_ivhd_device_extended_range(
+ ivhd_device,
+ ivhd_block->header.length, block_length);
+ break;
+ default:
+ dprintk(XENLOG_ERR, "IVHD Error: "
+ "Invalid Device Type!\n");
+ dev_length = 0;
+ break;
+ }
+
+ block_length += dev_length;
+ if ( !dev_length )
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init parse_ivrs_block(struct acpi_ivrs_block_header *ivrs_block)
+{
+ struct acpi_ivhd_block_header *ivhd_block;
+ struct acpi_ivmd_block_header *ivmd_block;
+
+ switch(ivrs_block->type)
+ {
+ case AMD_IOMMU_ACPI_IVHD_TYPE:
+ ivhd_block = (struct acpi_ivhd_block_header *)ivrs_block;
+ return parse_ivhd_block(ivhd_block);
+
+ case AMD_IOMMU_ACPI_IVMD_ALL_TYPE:
+ case AMD_IOMMU_ACPI_IVMD_ONE_TYPE:
+ case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE:
+ case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE:
+ ivmd_block = (struct acpi_ivmd_block_header *)ivrs_block;
+ return parse_ivmd_block(ivmd_block);
+
+ default:
+ dprintk(XENLOG_ERR, "IVRS Error: Invalid Block Type!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void __init dump_acpi_table_header(struct acpi_table_header *table)
+{
+ int i;
+
+ printk(XENLOG_INFO "AMD IOMMU: ACPI Table:\n");
+ printk(XENLOG_INFO " Signature ");
+ for ( i = 0; i < ACPI_NAME_SIZE; ++i )
+ printk("%c", table->signature[i]);
+ printk("\n");
+
+ printk(" Length 0x%x\n", table->length);
+ printk(" Revision 0x%x\n", table->revision);
+ printk(" CheckSum 0x%x\n", table->checksum);
+
+ printk(" OEM_Id ");
+ for ( i = 0; i < ACPI_OEM_ID_SIZE; ++i )
+ printk("%c", table->oem_id[i]);
+ printk("\n");
+
+ printk(" OEM_Table_Id ");
+ for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; ++i )
+ printk("%c", table->oem_table_id[i]);
+ printk("\n");
+
+ printk(" OEM_Revision 0x%x\n", table->oem_revision);
+
+ printk(" Creator_Id ");
+ for ( i = 0; i < ACPI_NAME_SIZE; ++i )
+ printk("%c", table->asl_compiler_id[i]);
+ printk("\n");
+
+ printk(" Creator_Revision 0x%x\n",
+ table->asl_compiler_revision);
+}
+
+int __init parse_ivrs_table(unsigned long phys_addr,
+ unsigned long size)
+{
+ struct acpi_ivrs_block_header *ivrs_block;
+ unsigned long length, i;
+ u8 checksum, *raw_table;
+ int error = 0;
+ struct acpi_table_header *table =
+ (struct acpi_table_header *) __acpi_map_table(phys_addr, size);
+
+ BUG_ON(!table);
+
+#if 0
+ dump_acpi_table_header(table);
+#endif
+
+ /* validate checksum: sum of entire table == 0 */
+ checksum = 0;
+ raw_table = (u8 *)table;
+ for ( i = 0; i < table->length; ++i )
+ checksum += raw_table[i];
+ if ( checksum )
+ {
+ dprintk(XENLOG_ERR, "IVRS Error: "
+ "Invalid Checksum 0x%x\n", checksum);
+ return -ENODEV;
+ }
+
+ /* parse IVRS blocks */
+ length = sizeof(struct acpi_ivrs_table_header);
+ while( error == 0 && table->length >
+ (length + sizeof(struct acpi_ivrs_block_header)) )
+ {
+ ivrs_block = (struct acpi_ivrs_block_header *)
+ ((u8 *)table + length);
+
+ dprintk(XENLOG_INFO, "IVRS Block:\n");
+ dprintk(XENLOG_INFO, " Type 0x%x\n", ivrs_block->type);
+ dprintk(XENLOG_INFO, " Flags 0x%x\n", ivrs_block->flags);
+ dprintk(XENLOG_INFO, " Length 0x%x\n", ivrs_block->length);
+ dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", ivrs_block->dev_id);
+
+ if (table->length >= (length + ivrs_block->length))
+ error = parse_ivrs_block(ivrs_block);
+ else
+ {
+ dprintk(XENLOG_ERR, "IVRS Error: "
+ "Table Length Exceeded: 0x%x -> 0x%lx\n",
+ table->length,
+ (length + ivrs_block->length));
+ return -ENODEV;
+ }
+ length += ivrs_block->length;
+ }
+
+ return error;
+}
diff -r 36529ef3ef23 -r 0e22182446fa xen/drivers/passthrough/amd/iommu_detect.c
--- a/xen/drivers/passthrough/amd/iommu_detect.c Thu Feb 28 13:19:38
2008 +0000
+++ b/xen/drivers/passthrough/amd/iommu_detect.c Thu Feb 28 13:21:49
2008 +0000
@@ -86,30 +86,24 @@ int __init get_iommu_capabilities(u8 bus
int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
struct amd_iommu *iommu)
{
- u32 cap_header, cap_range;
+ u32 cap_header, cap_range, misc_info;
u64 mmio_bar;
-#if HACK_BIOS_SETTINGS
- /* remove it when BIOS available */
- write_pci_config(bus, dev, func,
- cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
- write_pci_config(bus, dev, func,
- cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
- /* remove it when BIOS available */
-#endif
-
mmio_bar = (u64)read_pci_config(bus, dev, func,
- cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
+ cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
mmio_bar |= read_pci_config(bus, dev, func,
- cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) &
- PCI_CAP_MMIO_BAR_LOW_MASK;
- iommu->mmio_base_phys = (unsigned long)mmio_bar;
-
- if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) {
+ cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET);
+ iommu->mmio_base_phys = mmio_bar & (u64)~0x3FFF;
+
+ if ( (mmio_bar & 0x1) == 0 || iommu->mmio_base_phys == 0 )
+ {
dprintk(XENLOG_ERR ,
"AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar);
return -ENODEV;
}
+
+ iommu->bdf = (bus << 8) | PCI_DEVFN(dev, func);
+ iommu->cap_offset = cap_ptr;
cap_header = read_pci_config(bus, dev, func, cap_ptr);
iommu->revision = get_field_from_reg_u32(cap_header,
@@ -119,12 +113,15 @@ int __init get_iommu_capabilities(u8 bus
iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header,
PCI_CAP_HT_TUNNEL_MASK,
PCI_CAP_HT_TUNNEL_SHIFT);
- iommu->not_present_cached = get_field_from_reg_u32(cap_header,
+ iommu->pte_not_present_cached = get_field_from_reg_u32(cap_header,
PCI_CAP_NP_CACHE_MASK,
PCI_CAP_NP_CACHE_SHIFT);
cap_range = read_pci_config(bus, dev, func,
cap_ptr + PCI_CAP_RANGE_OFFSET);
+ iommu->unit_id = get_field_from_reg_u32(cap_range,
+ PCI_CAP_UNIT_ID_MASK,
+ PCI_CAP_UNIT_ID_SHIFT);
iommu->root_bus = get_field_from_reg_u32(cap_range,
PCI_CAP_BUS_NUMBER_MASK,
PCI_CAP_BUS_NUMBER_SHIFT);
@@ -135,6 +132,11 @@ int __init get_iommu_capabilities(u8 bus
PCI_CAP_LAST_DEVICE_MASK,
PCI_CAP_LAST_DEVICE_SHIFT);
+ misc_info = read_pci_config(bus, dev, func,
+ cap_ptr + PCI_MISC_INFO_OFFSET);
+ iommu->msi_number = get_field_from_reg_u32(misc_info,
+ PCI_CAP_MSI_NUMBER_MASK,
+ PCI_CAP_MSI_NUMBER_SHIFT);
return 0;
}
diff -r 36529ef3ef23 -r 0e22182446fa xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c Thu Feb 28 13:19:38 2008 +0000
+++ b/xen/drivers/passthrough/amd/iommu_init.c Thu Feb 28 13:21:49 2008 +0000
@@ -137,8 +137,49 @@ static void __init set_iommu_command_buf
writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
}
+static void __init register_iommu_exclusion_range(struct amd_iommu *iommu)
+{
+ u64 addr_lo, addr_hi;
+ u32 entry;
+
+ addr_lo = iommu->exclusion_limit & DMA_32BIT_MASK;
+ addr_hi = iommu->exclusion_limit >> 32;
+
+ set_field_in_reg_u32((u32)addr_hi, 0,
+ IOMMU_EXCLUSION_LIMIT_HIGH_MASK,
+ IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);
+ writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET);
+
+ set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
+ IOMMU_EXCLUSION_LIMIT_LOW_MASK,
+ IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);
+ writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET);
+
+ addr_lo = iommu->exclusion_base & DMA_32BIT_MASK;
+ addr_hi = iommu->exclusion_base >> 32;
+
+ set_field_in_reg_u32((u32)addr_hi, 0,
+ IOMMU_EXCLUSION_BASE_HIGH_MASK,
+ IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);
+ writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET);
+
+ set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
+ IOMMU_EXCLUSION_BASE_LOW_MASK,
+ IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);
+
+ set_field_in_reg_u32(iommu->exclusion_allow_all, entry,
+ IOMMU_EXCLUSION_ALLOW_ALL_MASK,
+ IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);
+
+ set_field_in_reg_u32(iommu->exclusion_enable, entry,
+ IOMMU_EXCLUSION_RANGE_ENABLE_MASK,
+ IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);
+ writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
+}
+
void __init enable_iommu(struct amd_iommu *iommu)
{
+ register_iommu_exclusion_range(iommu);
set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus);
diff -r 36529ef3ef23 -r 0e22182446fa xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c Thu Feb 28 13:19:38 2008 +0000
+++ b/xen/drivers/passthrough/amd/iommu_map.c Thu Feb 28 13:21:49 2008 +0000
@@ -234,16 +234,19 @@ static void amd_iommu_set_page_directory
}
void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
- u8 paging_mode)
+ u8 sys_mgt, u8 dev_ex, u8 paging_mode)
{
u64 addr_hi, addr_lo;
u32 entry;
- dte[6] = dte[5] = dte[4] = 0;
-
- set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0,
+ dte[7] = dte[6] = dte[5] = dte[4] = 0;
+
+ set_field_in_reg_u32(sys_mgt, 0,
IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
+ set_field_in_reg_u32(dev_ex, entry,
+ IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK,
+ IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT, &entry);
dte[3] = entry;
set_field_in_reg_u32(domain_id, 0,
@@ -448,3 +451,34 @@ int amd_iommu_unmap_page(struct domain *
return 0;
}
+
+int amd_iommu_reserve_domain_unity_map(
+ struct domain *domain,
+ unsigned long phys_addr,
+ unsigned long size, int iw, int ir)
+{
+ unsigned long flags, npages, i;
+ void *pte;
+ struct hvm_iommu *hd = domain_hvm_iommu(domain);
+
+ npages = region_to_pages(phys_addr, size);
+
+ spin_lock_irqsave(&hd->mapping_lock, flags);
+ for ( i = 0; i < npages; ++i )
+ {
+ pte = get_pte_from_page_tables(hd->root_table,
+ hd->paging_mode, phys_addr>>PAGE_SHIFT);
+ if ( pte == 0 )
+ {
+ dprintk(XENLOG_ERR,
+ "AMD IOMMU: Invalid IO pagetable entry phys_addr = %lx\n",
phys_addr);
+ spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ return -EFAULT;
+ }
+ set_page_table_entry_present((u32 *)pte,
+ phys_addr, iw, ir);
+ phys_addr += PAGE_SIZE;
+ }
+ spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ return 0;
+}
diff -r 36529ef3ef23 -r 0e22182446fa xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Feb 28 13:19:38
2008 +0000
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Feb 28 13:21:49
2008 +0000
@@ -20,6 +20,7 @@
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
+#include <asm/hvm/svm/amd-iommu-acpi.h>
#include <xen/sched.h>
#include <asm/mm.h>
#include "../pci-direct.h"
@@ -30,6 +31,9 @@ static long amd_iommu_cmd_buffer_entries
static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
int nr_amd_iommus = 0;
+unsigned short ivrs_bdf_entries = 0;
+struct ivrs_mappings *ivrs_mappings = NULL;
+
/* will set if amd-iommu HW is found */
int amd_iommu_enabled = 0;
@@ -82,13 +86,12 @@ static void __init detect_cleanup(void)
deallocate_iommu_resources(iommu);
xfree(iommu);
}
-}
-
-static int requestor_id_from_bdf(int bdf)
-{
- /* HACK - HACK */
- /* account for possible 'aliasing' by parent device */
- return bdf;
+
+ if ( ivrs_mappings )
+ {
+ xfree(ivrs_mappings);
+ ivrs_mappings = NULL;
+ }
}
static int __init allocate_iommu_table_struct(struct table_struct *table,
@@ -179,10 +182,21 @@ static int __init amd_iommu_init(void)
{
struct amd_iommu *iommu;
unsigned long flags;
+ u16 bdf;
for_each_amd_iommu ( iommu )
{
spin_lock_irqsave(&iommu->lock, flags);
+
+ /* assign default IOMMU values */
+ iommu->coherent = IOMMU_CONTROL_ENABLED;
+ iommu->isochronous = IOMMU_CONTROL_ENABLED;
+ iommu->res_pass_pw = IOMMU_CONTROL_ENABLED;
+ iommu->pass_pw = IOMMU_CONTROL_ENABLED;
+ iommu->ht_tunnel_enable = iommu->ht_tunnel_support ?
+ IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED;
+ iommu->exclusion_enable = IOMMU_CONTROL_DISABLED;
+ iommu->exclusion_allow_all = IOMMU_CONTROL_DISABLED;
/* register IOMMU data strucures in MMIO space */
if ( map_iommu_mmio_region(iommu) != 0 )
@@ -190,10 +204,30 @@ static int __init amd_iommu_init(void)
register_iommu_dev_table_in_mmio_space(iommu);
register_iommu_cmd_buffer_in_mmio_space(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+ /* assign default values for device entries */
+ for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
+ {
+ ivrs_mappings[bdf].dte_requestor_id = bdf;
+ ivrs_mappings[bdf].dte_sys_mgt_enable =
+ IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED;
+ ivrs_mappings[bdf].dte_allow_exclusion =
+ IOMMU_CONTROL_DISABLED;
+ ivrs_mappings[bdf].unity_map_enable =
+ IOMMU_CONTROL_DISABLED;
+ }
+
+ if ( acpi_table_parse(ACPI_IVRS, parse_ivrs_table) != 0 )
+ dprintk(XENLOG_INFO, "AMD IOMMU: Did not find IVRS table!\n");
+
+ for_each_amd_iommu ( iommu )
+ {
+ spin_lock_irqsave(&iommu->lock, flags);
/* enable IOMMU translation services */
enable_iommu(iommu);
nr_amd_iommus++;
-
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -229,31 +263,38 @@ struct amd_iommu *find_iommu_for_device(
}
void amd_iommu_setup_domain_device(
- struct domain *domain, struct amd_iommu *iommu, int requestor_id)
+ struct domain *domain, struct amd_iommu *iommu, int bdf)
{
void *dte;
u64 root_ptr;
unsigned long flags;
+ int req_id;
+ u8 sys_mgt, dev_ex;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
- BUG_ON( !hd->root_table||!hd->paging_mode );
+ BUG_ON( !hd->root_table || !hd->paging_mode );
root_ptr = (u64)virt_to_maddr(hd->root_table);
+ /* get device-table entry */
+ req_id = ivrs_mappings[bdf].dte_requestor_id;
dte = iommu->dev_table.buffer +
- (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
spin_lock_irqsave(&iommu->lock, flags);
- amd_iommu_set_dev_table_entry(
- (u32 *)dte,
- root_ptr, hd->domain_id, hd->paging_mode);
- invalidate_dev_table_entry(iommu, requestor_id);
+ /* bind DTE to domain page-tables */
+ sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
+ dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
+ amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
+ req_id, sys_mgt, dev_ex, hd->paging_mode);
+
+ invalidate_dev_table_entry(iommu, req_id);
flush_command_buffer(iommu);
dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
"root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
- requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
+ req_id, root_ptr, hd->domain_id, hd->paging_mode);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -266,7 +307,7 @@ void __init amd_iommu_setup_dom0_devices
struct pci_dev *pdev;
int bus, dev, func;
u32 l;
- int req_id, bdf;
+ int bdf;
for ( bus = 0; bus < 256; bus++ )
{
@@ -286,11 +327,12 @@ void __init amd_iommu_setup_dom0_devices
list_add_tail(&pdev->list, &hd->pdev_list);
bdf = (bus << 8) | pdev->devfn;
- req_id = requestor_id_from_bdf(bdf);
- iommu = find_iommu_for_device(bus, pdev->devfn);
+ /* supported device? */
+ iommu = (bdf < ivrs_bdf_entries) ?
+ find_iommu_for_device(bus, pdev->devfn) : NULL;
if ( iommu )
- amd_iommu_setup_domain_device(dom0, iommu, req_id);
+ amd_iommu_setup_domain_device(dom0, iommu, bdf);
}
}
}
@@ -299,6 +341,8 @@ int amd_iommu_detect(void)
int amd_iommu_detect(void)
{
unsigned long i;
+ int last_bus;
+ struct amd_iommu *iommu;
if ( !enable_amd_iommu )
{
@@ -318,6 +362,28 @@ int amd_iommu_detect(void)
{
printk("AMD IOMMU: Not found!\n");
return 0;
+ }
+ else
+ {
+ /* allocate 'ivrs mappings' table */
+ /* note: the table has entries to accomodate all IOMMUs */
+ last_bus = 0;
+ for_each_amd_iommu (iommu)
+ if (iommu->last_downstream_bus > last_bus)
+ last_bus = iommu->last_downstream_bus;
+
+ ivrs_bdf_entries = (last_bus + 1) *
+ IOMMU_DEV_TABLE_ENTRIES_PER_BUS;
+ ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
+
+ if ( !ivrs_mappings )
+ {
+ dprintk(XENLOG_ERR, "AMD IOMMU:"
+ " Error allocating IVRS DevMappings table\n");
+ goto error_out;
+ }
+ memset(ivrs_mappings, 0,
+ ivrs_bdf_entries * sizeof(struct ivrs_mappings));
}
if ( amd_iommu_init() != 0 )
@@ -407,23 +473,25 @@ int amd_iommu_domain_init(struct domain
}
static void amd_iommu_disable_domain_device(
- struct domain *domain, struct amd_iommu *iommu, u16 requestor_id)
+ struct domain *domain, struct amd_iommu *iommu, int bdf)
{
void *dte;
unsigned long flags;
-
+ int req_id;
+
+ req_id = ivrs_mappings[bdf].dte_requestor_id;
dte = iommu->dev_table.buffer +
- (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
spin_lock_irqsave(&iommu->lock, flags);
memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
- invalidate_dev_table_entry(iommu, requestor_id);
+ invalidate_dev_table_entry(iommu, req_id);
flush_command_buffer(iommu);
dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
" domain_id:%d, paging_mode:%d\n",
- requestor_id, domain_hvm_iommu(domain)->domain_id,
+ req_id, domain_hvm_iommu(domain)->domain_id,
domain_hvm_iommu(domain)->paging_mode);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -438,7 +506,7 @@ static int reassign_device( struct domai
struct hvm_iommu *target_hd = domain_hvm_iommu(target);
struct pci_dev *pdev;
struct amd_iommu *iommu;
- int req_id, bdf;
+ int bdf;
unsigned long flags;
for_each_pdev( source, pdev )
@@ -450,12 +518,13 @@ static int reassign_device( struct domai
pdev->devfn = devfn;
bdf = (bus << 8) | devfn;
- req_id = requestor_id_from_bdf(bdf);
- iommu = find_iommu_for_device(bus, devfn);
+ /* supported device? */
+ iommu = (bdf < ivrs_bdf_entries) ?
+ find_iommu_for_device(bus, pdev->devfn) : NULL;
if ( iommu )
{
- amd_iommu_disable_domain_device(source, iommu, req_id);
+ amd_iommu_disable_domain_device(source, iommu, bdf);
/* Move pci device from the source domain to target domain. */
spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
@@ -463,7 +532,7 @@ static int reassign_device( struct domai
spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
- amd_iommu_setup_domain_device(target, iommu, req_id);
+ amd_iommu_setup_domain_device(target, iommu, bdf);
gdprintk(XENLOG_INFO ,
"AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
@@ -484,6 +553,19 @@ static int reassign_device( struct domai
int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
{
+ int bdf = (bus << 8) | devfn;
+ int req_id;
+ req_id = ivrs_mappings[bdf].dte_requestor_id;
+
+ if (ivrs_mappings[req_id].unity_map_enable)
+ {
+ amd_iommu_reserve_domain_unity_map(d,
+ ivrs_mappings[req_id].addr_range_start,
+ ivrs_mappings[req_id].addr_range_length,
+ ivrs_mappings[req_id].write_permission,
+ ivrs_mappings[req_id].read_permission);
+ }
+
pdev_flr(bus, devfn);
return reassign_device(dom0, d, bus, devfn);
}
diff -r 36529ef3ef23 -r 0e22182446fa xen/include/asm-x86/amd-iommu.h
--- a/xen/include/asm-x86/amd-iommu.h Thu Feb 28 13:19:38 2008 +0000
+++ b/xen/include/asm-x86/amd-iommu.h Thu Feb 28 13:21:49 2008 +0000
@@ -43,14 +43,25 @@ struct amd_iommu {
struct list_head list;
spinlock_t lock; /* protect iommu */
- int iotlb_support;
- int ht_tunnel_support;
- int not_present_cached;
+ u16 bdf;
+ u8 cap_offset;
u8 revision;
+ u8 unit_id;
+ u8 msi_number;
u8 root_bus;
u8 first_devfn;
u8 last_devfn;
+
+ u8 pte_not_present_cached;
+ u8 ht_tunnel_support;
+ u8 iotlb_support;
+
+ u8 isochronous;
+ u8 coherent;
+ u8 res_pass_pw;
+ u8 pass_pw;
+ u8 ht_tunnel_enable;
int last_downstream_bus;
int downstream_bus_present[PCI_MAX_BUS_COUNT];
@@ -61,10 +72,23 @@ struct amd_iommu {
struct table_struct dev_table;
struct table_struct cmd_buffer;
u32 cmd_buffer_tail;
+ struct table_struct event_log;
+ u32 event_log_head;
- int exclusion_enabled;
+ int exclusion_enable;
+ int exclusion_allow_all;
unsigned long exclusion_base;
unsigned long exclusion_limit;
};
+struct ivrs_mappings {
+ u16 dte_requestor_id;
+ u8 dte_sys_mgt_enable;
+ u8 dte_allow_exclusion;
+ u8 unity_map_enable;
+ u8 write_permission;
+ u8 read_permission;
+ unsigned long addr_range_start;
+ unsigned long addr_range_length;
+};
#endif /* _ASM_X86_64_AMD_IOMMU_H */
diff -r 36529ef3ef23 -r 0e22182446fa
xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-acpi.h Thu Feb 28 13:21:49
2008 +0000
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ * Author: Leo Duran <leo.duran@xxxxxxx>
+ * Author: Wei Wang <wei.wang2@xxxxxxx> - adapted to xen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_64_AMD_IOMMU_ACPI_H
+#define _ASM_X86_64_AMD_IOMMU_ACPI_H
+
+#include <xen/acpi.h>
+
+/* I/O Virtualization Reporting Structure */
+#define AMD_IOMMU_ACPI_IVRS_SIG "IVRS"
+#define AMD_IOMMU_ACPI_IVHD_TYPE 0x10
+#define AMD_IOMMU_ACPI_IVMD_ALL_TYPE 0x20
+#define AMD_IOMMU_ACPI_IVMD_ONE_TYPE 0x21
+#define AMD_IOMMU_ACPI_IVMD_RANGE_TYPE 0x22
+#define AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE 0x23
+
+/* 4-byte Device Entries */
+#define AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD 0
+#define AMD_IOMMU_ACPI_IVHD_DEV_SELECT 2
+#define AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START 3
+#define AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END 4
+
+/* 8-byte Device Entries */
+#define AMD_IOMMU_ACPI_IVHD_DEV_U64_PAD 64
+#define AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT 66
+#define AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_RANGE 67
+#define AMD_IOMMU_ACPI_IVHD_DEV_EXT_SELECT 70
+#define AMD_IOMMU_ACPI_IVHD_DEV_EXT_RANGE 71
+
+/* IVHD IOMMU Flags */
+#define AMD_IOMMU_ACPI_COHERENT_MASK 0x20
+#define AMD_IOMMU_ACPI_COHERENT_SHIFT 5
+#define AMD_IOMMU_ACPI_IOTLB_SUP_MASK 0x10
+#define AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT 4
+#define AMD_IOMMU_ACPI_ISOC_MASK 0x08
+#define AMD_IOMMU_ACPI_ISOC_SHIFT 3
+#define AMD_IOMMU_ACPI_RES_PASS_PW_MASK 0x04
+#define AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT 2
+#define AMD_IOMMU_ACPI_PASS_PW_MASK 0x02
+#define AMD_IOMMU_ACPI_PASS_PW_SHIFT 1
+#define AMD_IOMMU_ACPI_HT_TUN_ENB_MASK 0x01
+#define AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT 0
+
+/* IVHD Device Flags */
+#define AMD_IOMMU_ACPI_LINT1_PASS_MASK 0x80
+#define AMD_IOMMU_ACPI_LINT1_PASS_SHIFT 7
+#define AMD_IOMMU_ACPI_LINT0_PASS_MASK 0x40
+#define AMD_IOMMU_ACPI_LINT0_PASS_SHIFT 6
+#define AMD_IOMMU_ACPI_SYS_MGT_MASK 0x30
+#define AMD_IOMMU_ACPI_SYS_MGT_SHIFT 4
+#define AMD_IOMMU_ACPI_NMI_PASS_MASK 0x04
+#define AMD_IOMMU_ACPI_NMI_PASS_SHIFT 2
+#define AMD_IOMMU_ACPI_EINT_PASS_MASK 0x02
+#define AMD_IOMMU_ACPI_EINT_PASS_SHIFT 1
+#define AMD_IOMMU_ACPI_INIT_PASS_MASK 0x01
+#define AMD_IOMMU_ACPI_INIT_PASS_SHIFT 0
+
+/* IVHD Device Extended Flags */
+#define AMD_IOMMU_ACPI_ATS_DISABLED_MASK 0x80000000
+#define AMD_IOMMU_ACPI_ATS_DISABLED_SHIFT 31
+
+/* IVMD Device Flags */
+#define AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK 0x08
+#define AMD_IOMMU_ACPI_EXCLUSION_RANGE_SHIFT 3
+#define AMD_IOMMU_ACPI_IW_PERMISSION_MASK 0x04
+#define AMD_IOMMU_ACPI_IW_PERMISSION_SHIFT 2
+#define AMD_IOMMU_ACPI_IR_PERMISSION_MASK 0x02
+#define AMD_IOMMU_ACPI_IR_PERMISSION_SHIFT 1
+#define AMD_IOMMU_ACPI_UNITY_MAPPING_MASK 0x01
+#define AMD_IOMMU_ACPI_UNITY_MAPPING_SHIFT 0
+
+#define ACPI_OEM_ID_SIZE 6
+#define ACPI_OEM_TABLE_ID_SIZE 8
+
+#pragma pack(1)
+struct acpi_ivrs_table_header {
+ struct acpi_table_header acpi_header;
+ u32 io_info;
+ u8 reserved[8];
+};
+
+struct acpi_ivrs_block_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+ u16 dev_id;
+};
+
+struct acpi_ivhd_block_header {
+ struct acpi_ivrs_block_header header;
+ u16 cap_offset;
+ u64 mmio_base;
+ u16 pci_segment;
+ u16 iommu_info;
+ u8 reserved[4];
+};
+
+struct acpi_ivhd_device_header {
+ u8 type;
+ u16 dev_id;
+ u8 flags;
+};
+
+struct acpi_ivhd_device_trailer {
+ u8 type;
+ u16 dev_id;
+ u8 reserved;
+};
+
+struct acpi_ivhd_device_range {
+ struct acpi_ivhd_device_header header;
+ struct acpi_ivhd_device_trailer trailer;
+};
+
+struct acpi_ivhd_device_alias {
+ struct acpi_ivhd_device_header header;
+ u8 reserved1;
+ u16 dev_id;
+ u8 reserved2;
+};
+
+struct acpi_ivhd_device_alias_range {
+ struct acpi_ivhd_device_alias alias;
+ struct acpi_ivhd_device_trailer trailer;
+};
+
+struct acpi_ivhd_device_extended {
+ struct acpi_ivhd_device_header header;
+ u32 ext_flags;
+};
+
+struct acpi_ivhd_device_extended_range {
+ struct acpi_ivhd_device_extended extended;
+ struct acpi_ivhd_device_trailer trailer;
+};
+
+union acpi_ivhd_device {
+ struct acpi_ivhd_device_header header;
+ struct acpi_ivhd_device_range range;
+ struct acpi_ivhd_device_alias alias;
+ struct acpi_ivhd_device_alias_range alias_range;
+ struct acpi_ivhd_device_extended extended;
+ struct acpi_ivhd_device_extended_range extended_range;
+};
+
+struct acpi_ivmd_block_header {
+ struct acpi_ivrs_block_header header;
+ union {
+ u16 last_dev_id;
+ u16 cap_offset;
+ u16 reserved1;
+ };
+ u64 reserved2;
+ u64 start_addr;
+ u64 mem_length;
+};
+#pragma pack()
+
+#endif /* _ASM_X86_64_AMD_IOMMU_ACPI_H */
diff -r 36529ef3ef23 -r 0e22182446fa
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Thu Feb 28 13:19:38
2008 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Thu Feb 28 13:21:49
2008 +0000
@@ -117,6 +117,12 @@
#define PCI_CAP_FIRST_DEVICE_SHIFT 16
#define PCI_CAP_LAST_DEVICE_MASK 0xFF000000
#define PCI_CAP_LAST_DEVICE_SHIFT 24
+
+#define PCI_CAP_UNIT_ID_MASK 0x0000001F
+#define PCI_CAP_UNIT_ID_SHIFT 0
+#define PCI_MISC_INFO_OFFSET 0x10
+#define PCI_CAP_MSI_NUMBER_MASK 0x0000001F
+#define PCI_CAP_MSI_NUMBER_SHIFT 0
/* Device Table */
#define IOMMU_DEV_TABLE_BASE_LOW_OFFSET 0x00
diff -r 36529ef3ef23 -r 0e22182446fa
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Feb 28 13:19:38
2008 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Feb 28 13:21:49
2008 +0000
@@ -21,6 +21,7 @@
#ifndef _ASM_X86_64_AMD_IOMMU_PROTO_H
#define _ASM_X86_64_AMD_IOMMU_PROTO_H
+#include <xen/sched.h>
#include <asm/amd-iommu.h>
#define for_each_amd_iommu(amd_iommu) \
@@ -54,10 +55,12 @@ int amd_iommu_map_page(struct domain *d,
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
+int amd_iommu_reserve_domain_unity_map(struct domain *domain,
+ unsigned long phys_addr, unsigned long size, int iw, int ir);
/* device table functions */
-void amd_iommu_set_dev_table_entry(u32 *dte,
- u64 root_ptr, u16 domain_id, u8 paging_mode);
+void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr,
+ u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
int amd_iommu_is_dte_page_translation_valid(u32 *entry);
void invalidate_dev_table_entry(struct amd_iommu *iommu,
u16 devic_id);
@@ -69,10 +72,13 @@ void flush_command_buffer(struct amd_iom
/* iommu domain funtions */
int amd_iommu_domain_init(struct domain *domain);
void amd_iommu_setup_domain_device(struct domain *domain,
- struct amd_iommu *iommu, int requestor_id);
+ struct amd_iommu *iommu, int bdf);
/* find iommu for bdf */
struct amd_iommu *find_iommu_for_device(int bus, int devfn);
+
+/* amd-iommu-acpi functions */
+int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size);
static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
{
@@ -91,4 +97,16 @@ static inline u32 set_field_in_reg_u32(u
return reg_value;
}
+static inline u8 get_field_from_byte(u8 value, u8 mask, u8 shift)
+{
+ u8 field;
+ field = (value & mask) >> shift;
+ return field;
+}
+
+static inline unsigned long region_to_pages(unsigned long addr, unsigned long
size)
+{
+ return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
+}
+
#endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */
diff -r 36529ef3ef23 -r 0e22182446fa xen/include/xen/acpi.h
--- a/xen/include/xen/acpi.h Thu Feb 28 13:19:38 2008 +0000
+++ b/xen/include/xen/acpi.h Thu Feb 28 13:21:49 2008 +0000
@@ -368,6 +368,7 @@ enum acpi_table_id {
ACPI_HPET,
ACPI_MCFG,
ACPI_DMAR,
+ ACPI_IVRS,
ACPI_TABLE_COUNT
};
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|