# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1201186163 0
# Node ID b96a1adbcac05731af3b06cb73eacc628230d950
# Parent 430b2159c4b723c7c6e45df102861be558159093
Revert vt-d patches pulled from xen-unstable since 3.2.0 release.
These are 3.2-testing changesets 16721 and 16723, or in xen-unstable:
xen-unstable changeset: 16775:cc5bb500df5feda0755b865134c47f3fe9cec46d
xen-unstable changeset: 16753:2633dc4f55d4010d7d64e9c6a1cf0b28707c7950
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/hvm/vmx/vtd/extern.h | 55 ---
xen/arch/x86/hvm/vmx/vtd/qinval.c | 456 ------------------------------
xen/arch/x86/hvm/vmx/vtd/vtd.h | 54 ---
xen/arch/x86/hvm/vmx/vtd/Makefile | 1
xen/arch/x86/hvm/vmx/vtd/dmar.c | 223 +++++---------
xen/arch/x86/hvm/vmx/vtd/dmar.h | 31 --
xen/arch/x86/hvm/vmx/vtd/intel-iommu.c | 165 +++-------
xen/arch/x86/hvm/vmx/vtd/utils.c | 9
xen/include/asm-x86/hvm/vmx/intel-iommu.h | 93 +-----
xen/include/asm-x86/iommu.h | 4
10 files changed, 171 insertions(+), 920 deletions(-)
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/Makefile
--- a/xen/arch/x86/hvm/vmx/vtd/Makefile Tue Jan 22 11:25:21 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/Makefile Thu Jan 24 14:49:23 2008 +0000
@@ -2,4 +2,3 @@ obj-y += dmar.o
obj-y += dmar.o
obj-y += utils.o
obj-y += io.o
-obj-y += qinval.o
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/dmar.c
--- a/xen/arch/x86/hvm/vmx/vtd/dmar.c Tue Jan 22 11:25:21 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/dmar.c Thu Jan 24 14:49:23 2008 +0000
@@ -43,6 +43,7 @@ LIST_HEAD(acpi_drhd_units);
LIST_HEAD(acpi_drhd_units);
LIST_HEAD(acpi_rmrr_units);
LIST_HEAD(acpi_atsr_units);
+LIST_HEAD(acpi_ioapic_units);
u8 dmar_host_address_width;
@@ -65,47 +66,6 @@ static int __init acpi_register_rmrr_uni
return 0;
}
-static int acpi_ioapic_device_match(
- struct list_head *ioapic_list, unsigned int apic_id)
-{
- struct acpi_ioapic_unit *ioapic;
- list_for_each_entry( ioapic, ioapic_list, list ) {
- if (ioapic->apic_id == apic_id)
- return 1;
- }
- return 0;
-}
-
-struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id)
-{
- struct acpi_drhd_unit *drhd;
- list_for_each_entry( drhd, &acpi_drhd_units, list ) {
- if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
- dprintk(XENLOG_INFO VTDPREFIX,
- "ioapic_to_drhd: drhd->address = %lx\n",
- drhd->address);
- return drhd;
- }
- }
- return NULL;
-}
-
-struct iommu * ioapic_to_iommu(unsigned int apic_id)
-{
- struct acpi_drhd_unit *drhd;
-
- list_for_each_entry( drhd, &acpi_drhd_units, list ) {
- if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
- dprintk(XENLOG_INFO VTDPREFIX,
- "ioapic_to_iommu: drhd->address = %lx\n",
- drhd->address);
- return drhd->iommu;
- }
- }
- dprintk(XENLOG_WARNING VTDPREFIX, "returning NULL\n");
- return NULL;
-}
-
static int acpi_pci_device_match(struct pci_dev *devices, int cnt,
struct pci_dev *dev)
{
@@ -151,18 +111,18 @@ struct acpi_drhd_unit * acpi_find_matche
if ( acpi_pci_device_match(drhd->devices,
drhd->devices_cnt, dev) )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
- drhd->address);
+ gdprintk(XENLOG_INFO VTDPREFIX,
+ "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
+ drhd->address);
return drhd;
}
}
if ( include_all_drhd )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
- include_all_drhd->address);
+ gdprintk(XENLOG_INFO VTDPREFIX,
+ "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
+ include_all_drhd->address);
return include_all_drhd;
}
@@ -200,8 +160,8 @@ struct acpi_atsr_unit * acpi_find_matche
if ( all_ports_atsru )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "acpi_find_matched_atsr_unit: all_ports_atsru\n");
+ gdprintk(XENLOG_INFO VTDPREFIX,
+ "acpi_find_matched_atsr_unit: all_ports_atsru\n");
return all_ports_atsru;;
}
@@ -220,10 +180,9 @@ static int scope_device_count(void *star
while ( start < end )
{
scope = start;
- if ( (scope->length < MIN_SCOPE_LEN) ||
- (scope->dev_type >= ACPI_DEV_ENTRY_COUNT) )
- {
- dprintk(XENLOG_WARNING VTDPREFIX, "Invalid device scope\n");
+ if ( scope->length < MIN_SCOPE_LEN )
+ {
+ printk(KERN_WARNING PREFIX "Invalid device scope\n");
return -EINVAL;
}
@@ -240,16 +199,16 @@ static int scope_device_count(void *star
if ( scope->dev_type == ACPI_DEV_ENDPOINT )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "found endpoint: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found endpoint: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
count++;
}
else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "found bridge: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found bridge: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
sec_bus = read_pci_config_byte(
bus, path->dev, path->fn, PCI_SECONDARY_BUS);
sub_bus = read_pci_config_byte(
@@ -278,16 +237,16 @@ static int scope_device_count(void *star
}
else if ( scope->dev_type == ACPI_DEV_IOAPIC )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "found IOAPIC: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found IOAPIC: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
count++;
}
else
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "found MSI HPET: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found MSI HPET: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
count++;
}
@@ -297,8 +256,8 @@ static int scope_device_count(void *star
return count;
}
-static int __init acpi_parse_dev_scope(
- void *start, void *end, void *acpi_entry, int type)
+static int __init acpi_parse_dev_scope(void *start, void *end, int *cnt,
+ struct pci_dev **devices)
{
struct acpi_dev_scope *scope;
u8 bus, sub_bus, sec_bus;
@@ -309,33 +268,10 @@ static int __init acpi_parse_dev_scope(
u8 dev, func;
u32 l;
- int *cnt = NULL;
- struct pci_dev **devices = NULL;
- struct acpi_drhd_unit *dmaru = (struct acpi_drhd_unit *) acpi_entry;
- struct acpi_rmrr_unit *rmrru = (struct acpi_rmrr_unit *) acpi_entry;
- struct acpi_atsr_unit *atsru = (struct acpi_atsr_unit *) acpi_entry;
-
- switch (type) {
- case DMAR_TYPE:
- cnt = &(dmaru->devices_cnt);
- devices = &(dmaru->devices);
- break;
- case RMRR_TYPE:
- cnt = &(rmrru->devices_cnt);
- devices = &(rmrru->devices);
- break;
- case ATSR_TYPE:
- cnt = &(atsru->devices_cnt);
- devices = &(atsru->devices);
- break;
- default:
- dprintk(XENLOG_ERR VTDPREFIX, "invalid vt-d acpi entry type\n");
- }
-
*cnt = scope_device_count(start, end);
if ( *cnt == 0 )
{
- dprintk(XENLOG_INFO VTDPREFIX, "acpi_parse_dev_scope: no device\n");
+ printk(KERN_INFO PREFIX "acpi_parse_dev_scope: no device\n");
return 0;
}
@@ -362,18 +298,18 @@ static int __init acpi_parse_dev_scope(
if ( scope->dev_type == ACPI_DEV_ENDPOINT )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "found endpoint: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found endpoint: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
pdev->bus = bus;
pdev->devfn = PCI_DEVFN(path->dev, path->fn);
pdev++;
}
else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
{
- dprintk(XENLOG_INFO VTDPREFIX,
- "found bridge: bus = %x dev = %x func = %x\n",
- bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found bridge: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
sec_bus = read_pci_config_byte(
bus, path->dev, path->fn, PCI_SECONDARY_BUS);
sub_bus = read_pci_config_byte(
@@ -412,15 +348,16 @@ static int __init acpi_parse_dev_scope(
acpi_ioapic_unit->ioapic.bdf.bus = bus;
acpi_ioapic_unit->ioapic.bdf.dev = path->dev;
acpi_ioapic_unit->ioapic.bdf.func = path->fn;
- list_add(&acpi_ioapic_unit->list, &dmaru->ioapic_list);
- dprintk(XENLOG_INFO VTDPREFIX,
- "found IOAPIC: bus = %x dev = %x func = %x\n",
- bus, path->dev, path->fn);
+ list_add(&acpi_ioapic_unit->list, &acpi_ioapic_units);
+ printk(KERN_INFO PREFIX
+ "found IOAPIC: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
}
else
- dprintk(XENLOG_INFO VTDPREFIX,
- "found MSI HPET: bus = %x dev = %x func = %x\n",
- bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found MSI HPET: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
+
start += scope->length;
}
@@ -434,7 +371,6 @@ acpi_parse_one_drhd(struct acpi_dmar_ent
struct acpi_drhd_unit *dmaru;
int ret = 0;
static int include_all;
- void *dev_scope_start, *dev_scope_end;
dmaru = xmalloc(struct acpi_drhd_unit);
if ( !dmaru )
@@ -443,22 +379,21 @@ acpi_parse_one_drhd(struct acpi_dmar_ent
dmaru->address = drhd->address;
dmaru->include_all = drhd->flags & 1; /* BIT0: INCLUDE_ALL */
- INIT_LIST_HEAD(&dmaru->ioapic_list);
- dprintk(XENLOG_INFO VTDPREFIX, "dmaru->address = %lx\n", dmaru->address);
-
- dev_scope_start = (void *)(drhd + 1);
- dev_scope_end = ((void *)drhd) + header->length;
- ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
- dmaru, DMAR_TYPE);
-
- if ( dmaru->include_all )
- {
- dprintk(XENLOG_INFO VTDPREFIX, "found INCLUDE_ALL\n");
+ printk(KERN_INFO PREFIX "dmaru->address = %lx\n", dmaru->address);
+
+ if ( !dmaru->include_all )
+ ret = acpi_parse_dev_scope(
+ (void *)(drhd + 1),
+ ((void *)drhd) + header->length,
+ &dmaru->devices_cnt, &dmaru->devices);
+ else
+ {
+ printk(KERN_INFO PREFIX "found INCLUDE_ALL\n");
/* Only allow one INCLUDE_ALL */
if ( include_all )
{
- dprintk(XENLOG_WARNING VTDPREFIX,
- "Only one INCLUDE_ALL device scope is allowed\n");
+ printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
+ "device scope is allowed\n");
ret = -EINVAL;
}
include_all = 1;
@@ -476,7 +411,6 @@ acpi_parse_one_rmrr(struct acpi_dmar_ent
{
struct acpi_table_rmrr *rmrr = (struct acpi_table_rmrr *)header;
struct acpi_rmrr_unit *rmrru;
- void *dev_scope_start, *dev_scope_end;
int ret = 0;
rmrru = xmalloc(struct acpi_rmrr_unit);
@@ -486,10 +420,15 @@ acpi_parse_one_rmrr(struct acpi_dmar_ent
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
- dev_scope_start = (void *)(rmrr + 1);
- dev_scope_end = ((void *)rmrr) + header->length;
- ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
- rmrru, RMRR_TYPE);
+ printk(KERN_INFO PREFIX
+ "acpi_parse_one_rmrr: base=%"PRIx64" end=%"PRIx64"\n",
+ rmrr->base_address, rmrr->end_address);
+
+ ret = acpi_parse_dev_scope(
+ (void *)(rmrr + 1),
+ ((void*)rmrr) + header->length,
+ &rmrru->devices_cnt, &rmrru->devices);
+
if ( ret || (rmrru->devices_cnt == 0) )
xfree(rmrru);
else
@@ -504,7 +443,6 @@ acpi_parse_one_atsr(struct acpi_dmar_ent
struct acpi_atsr_unit *atsru;
int ret = 0;
static int all_ports;
- void *dev_scope_start, *dev_scope_end;
atsru = xmalloc(struct acpi_atsr_unit);
if ( !atsru )
@@ -513,19 +451,18 @@ acpi_parse_one_atsr(struct acpi_dmar_ent
atsru->all_ports = atsr->flags & 1; /* BIT0: ALL_PORTS */
if ( !atsru->all_ports )
- {
- dev_scope_start = (void *)(atsr + 1);
- dev_scope_end = ((void *)atsr) + header->length;
- ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
- atsru, ATSR_TYPE);
- }
- else {
- dprintk(XENLOG_INFO VTDPREFIX, "found ALL_PORTS\n");
+ ret = acpi_parse_dev_scope(
+ (void *)(atsr + 1),
+ ((void *)atsr) + header->length,
+ &atsru->devices_cnt, &atsru->devices);
+ else
+ {
+ printk(KERN_INFO PREFIX "found ALL_PORTS\n");
/* Only allow one ALL_PORTS */
if ( all_ports )
{
- dprintk(XENLOG_WARNING VTDPREFIX,
- "Only one ALL_PORTS device scope is allowed\n");
+ printk(KERN_WARNING PREFIX "Only one ALL_PORTS "
+ "device scope is allowed\n");
ret = -EINVAL;
}
all_ports = 1;
@@ -551,19 +488,19 @@ static int __init acpi_parse_dmar(unsign
dmar = (struct acpi_table_dmar *)__acpi_map_table(phys_addr, size);
if ( !dmar )
{
- dprintk(XENLOG_WARNING VTDPREFIX, "Unable to map DMAR\n");
+ printk(KERN_WARNING PREFIX "Unable to map DMAR\n");
return -ENODEV;
}
if ( !dmar->haw )
{
- dprintk(XENLOG_WARNING VTDPREFIX, "Zero: Invalid DMAR haw\n");
+ printk(KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n");
return -EINVAL;
}
dmar_host_address_width = dmar->haw;
- dprintk(XENLOG_INFO VTDPREFIX, "Host address width %d\n",
- dmar_host_address_width);
+ printk(KERN_INFO PREFIX "Host address width %d\n",
+ dmar_host_address_width);
entry_header = (struct acpi_dmar_entry_header *)(dmar + 1);
while ( ((unsigned long)entry_header) <
@@ -572,19 +509,19 @@ static int __init acpi_parse_dmar(unsign
switch ( entry_header->type )
{
case ACPI_DMAR_DRHD:
- dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_DRHD\n");
+ printk(KERN_INFO PREFIX "found ACPI_DMAR_DRHD\n");
ret = acpi_parse_one_drhd(entry_header);
break;
case ACPI_DMAR_RMRR:
- dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_RMRR\n");
+ printk(KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n");
ret = acpi_parse_one_rmrr(entry_header);
break;
case ACPI_DMAR_ATSR:
- dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_ATSR\n");
+ printk(KERN_INFO PREFIX "found ACPI_DMAR_ATSR\n");
ret = acpi_parse_one_atsr(entry_header);
break;
default:
- dprintk(XENLOG_WARNING VTDPREFIX, "Unknown DMAR structure type\n");
+ printk(KERN_WARNING PREFIX "Unknown DMAR structure type\n");
ret = -EINVAL;
break;
}
@@ -614,7 +551,7 @@ int acpi_dmar_init(void)
if ( list_empty(&acpi_drhd_units) )
{
- dprintk(XENLOG_ERR VTDPREFIX, "No DMAR devices found\n");
+ printk(KERN_ERR PREFIX "No DMAR devices found\n");
vtd_enabled = 0;
return -ENODEV;
}
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/dmar.h
--- a/xen/arch/x86/hvm/vmx/vtd/dmar.h Tue Jan 22 11:25:21 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/dmar.h Thu Jan 24 14:49:23 2008 +0000
@@ -26,20 +26,6 @@
extern u8 dmar_host_address_width;
-/* This one is for interrupt remapping */
-struct acpi_ioapic_unit {
- struct list_head list;
- int apic_id;
- union {
- u16 info;
- struct {
- u16 func: 3,
- dev: 5,
- bus: 8;
- }bdf;
- }ioapic;
-};
-
struct acpi_drhd_unit {
struct list_head list;
unsigned long address; /* register base address of the unit */
@@ -47,7 +33,6 @@ struct acpi_drhd_unit {
int devices_cnt;
u8 include_all:1;
struct iommu *iommu;
- struct list_head ioapic_list;
};
struct acpi_rmrr_unit {
@@ -88,9 +73,19 @@ struct acpi_drhd_unit * acpi_find_matche
struct acpi_drhd_unit * acpi_find_matched_drhd_unit(struct pci_dev *dev);
struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev);
-#define DMAR_TYPE 1
-#define RMRR_TYPE 2
-#define ATSR_TYPE 3
+/* This one is for interrupt remapping */
+struct acpi_ioapic_unit {
+ struct list_head list;
+ int apic_id;
+ union {
+ u16 info;
+ struct {
+ u16 bus: 8,
+ dev: 5,
+ func: 3;
+ }bdf;
+ }ioapic;
+};
#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
#define time_after(a,b) \
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/extern.h
--- a/xen/arch/x86/hvm/vmx/vtd/extern.h Tue Jan 22 11:25:21 2008 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) Allen Kay <allen.m.kay@xxxxxxxxx>
- * Copyright (C) Weidong Han <weidong.han@xxxxxxxxx>
- */
-
-#ifndef _VTD_EXTERN_H_
-#define _VTD_EXTERN_H_
-
-#include "dmar.h"
-
-extern int iommu_setup_done;
-extern int vtd2_thurley_enabled;
-extern int vtd2_qinval_enabled;
-
-extern spinlock_t ioapic_lock;
-extern struct qi_ctrl *qi_ctrl;
-extern struct ir_ctrl *ir_ctrl;
-
-void print_iommu_regs(struct acpi_drhd_unit *drhd);
-void print_vtd_entries(struct domain *d, struct iommu *iommu,
- int bus, int devfn, unsigned long gmfn);
-
-int qinval_setup(struct iommu *iommu);
-int queue_invalidate_context(struct iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u8 granu);
-int queue_invalidate_iotlb(struct iommu *iommu,
- u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr);
-int queue_invalidate_iec(struct iommu *iommu,
- u8 granu, u8 im, u16 iidx);
-int invalidate_sync(struct iommu *iommu);
-int iommu_flush_iec_global(struct iommu *iommu);
-int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx);
-void gsi_remapping(unsigned int gsi);
-void print_iommu_regs(struct acpi_drhd_unit *drhd);
-int vtd_hw_check(void);
-struct iommu * ioapic_to_iommu(unsigned int apic_id);
-struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id);
-void clear_fault_bits(struct iommu *iommu);
-
-#endif // _VTD_EXTERN_H_
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
--- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Tue Jan 22 11:25:21 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Thu Jan 24 14:49:23 2008 +0000
@@ -34,9 +34,12 @@
#include "pci-direct.h"
#include "pci_regs.h"
#include "msi.h"
-#include "extern.h"
#define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid)
+
+extern void print_iommu_regs(struct acpi_drhd_unit *drhd);
+extern void print_vtd_entries(struct domain *d, int bus, int devfn,
+ unsigned long gmfn);
static spinlock_t domid_bitmap_lock; /* protect domain id bitmap */
static int domid_bitmap_size; /* domain id bitmap size in bit */
@@ -301,12 +304,11 @@ static void iommu_flush_write_buffer(str
}
/* return value determine if we need a write buffer flush */
-static int flush_context_reg(
- void *_iommu,
+static int __iommu_flush_context(
+ struct iommu *iommu,
u16 did, u16 source_id, u8 function_mask, u64 type,
int non_present_entry_flush)
{
- struct iommu *iommu = (struct iommu *) _iommu;
u64 val = 0;
unsigned long flag;
unsigned long start_time;
@@ -365,16 +367,14 @@ static int inline iommu_flush_context_gl
static int inline iommu_flush_context_global(
struct iommu *iommu, int non_present_entry_flush)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
+ return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
non_present_entry_flush);
}
static int inline iommu_flush_context_domain(
struct iommu *iommu, u16 did, int non_present_entry_flush)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
+ return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
non_present_entry_flush);
}
@@ -382,18 +382,16 @@ static int inline iommu_flush_context_de
struct iommu *iommu, u16 did, u16 source_id,
u8 function_mask, int non_present_entry_flush)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->context(iommu, did, source_id, function_mask,
+ return __iommu_flush_context(iommu, did, source_id, function_mask,
DMA_CCMD_DEVICE_INVL,
non_present_entry_flush);
}
/* return value determine if we need a write buffer flush */
-static int flush_iotlb_reg(void *_iommu, u16 did,
+static int __iommu_flush_iotlb(struct iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type,
int non_present_entry_flush)
{
- struct iommu *iommu = (struct iommu *) _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
@@ -469,16 +467,14 @@ static int inline iommu_flush_iotlb_glob
static int inline iommu_flush_iotlb_global(struct iommu *iommu,
int non_present_entry_flush)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
+ return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
non_present_entry_flush);
}
static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
int non_present_entry_flush)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
+ return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
non_present_entry_flush);
}
@@ -502,7 +498,6 @@ static int inline iommu_flush_iotlb_psi(
u64 addr, unsigned int pages, int non_present_entry_flush)
{
unsigned int align;
- struct iommu_flush *flush = iommu_get_flush(iommu);
BUG_ON(addr & (~PAGE_MASK_4K));
BUG_ON(pages == 0);
@@ -525,7 +520,7 @@ static int inline iommu_flush_iotlb_psi(
addr >>= PAGE_SHIFT_4K + align;
addr <<= PAGE_SHIFT_4K + align;
- return flush->iotlb(iommu, did, addr, align,
+ return __iommu_flush_iotlb(iommu, did, addr, align,
DMA_TLB_PSI_FLUSH, non_present_entry_flush);
}
@@ -706,7 +701,7 @@ static int iommu_enable_translation(stru
unsigned long flags;
dprintk(XENLOG_INFO VTDPREFIX,
- "iommu_enable_translation: iommu->reg = %p\n", iommu->reg);
+ "iommu_enable_translation: enabling vt-d translation\n");
spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd |= DMA_GCMD_TE;
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
@@ -751,47 +746,14 @@ static int iommu_page_fault_do_one(struc
u8 fault_reason, u16 source_id, u32 addr)
{
dprintk(XENLOG_WARNING VTDPREFIX,
- "iommu_fault:%s: %x:%x.%x addr %x REASON %x iommu->reg = %p\n",
- (type ? "DMA Read" : "DMA Write"), (source_id >> 8),
- PCI_SLOT(source_id & 0xFF), PCI_FUNC(source_id & 0xFF), addr,
- fault_reason, iommu->reg);
-
- if (fault_reason < 0x20)
- print_vtd_entries(current->domain, iommu, (source_id >> 8),
- (source_id & 0xff), (addr >> PAGE_SHIFT));
-
+ "iommu_page_fault:%s: DEVICE %x:%x.%x addr %x REASON %x\n",
+ (type ? "DMA Read" : "DMA Write"),
+ (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+ PCI_FUNC(source_id & 0xFF), addr, fault_reason);
+
+ print_vtd_entries(current->domain, (source_id >> 8),(source_id & 0xff),
+ (addr >> PAGE_SHIFT));
return 0;
-}
-
-static void iommu_fault_status(u32 fault_status)
-{
- if (fault_status & DMA_FSTS_PFO)
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Fault Overflow\n");
- else
- if (fault_status & DMA_FSTS_PPF)
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Primary Pending Fault\n");
- else
- if (fault_status & DMA_FSTS_AFO)
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Advanced Fault Overflow\n");
- else
- if (fault_status & DMA_FSTS_APF)
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Advanced Pending Fault\n");
- else
- if (fault_status & DMA_FSTS_IQE)
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Invalidation Queue Error\n");
- else
- if (fault_status & DMA_FSTS_ICE)
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Invalidation Completion Error\n");
- else
- if (fault_status & DMA_FSTS_ITE)
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Invalidation Time-out Error\n");
}
#define PRIMARY_FAULT_REG_LEN (16)
@@ -809,8 +771,6 @@ static void iommu_page_fault(int vector,
spin_lock_irqsave(&iommu->register_lock, flags);
fault_status = dmar_readl(iommu->reg, DMAR_FSTS_REG);
spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- iommu_fault_status(fault_status);
/* FIXME: ignore advanced fault log */
if ( !(fault_status & DMA_FSTS_PPF) )
@@ -976,8 +936,6 @@ struct iommu *iommu_alloc(void *hw_data)
{
struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data;
struct iommu *iommu;
- struct qi_ctrl *qi_ctrl;
- struct ir_ctrl *ir_ctrl;
if ( nr_iommus > MAX_IOMMUS )
{
@@ -993,10 +951,9 @@ struct iommu *iommu_alloc(void *hw_data)
set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address);
iommu->reg = (void *) fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
-
- printk("iommu_alloc: iommu->reg = %p drhd->address = %lx\n",
- iommu->reg, drhd->address);
-
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "iommu_alloc: iommu->reg = %p drhd->address = %lx\n",
+ iommu->reg, drhd->address);
nr_iommus++;
if ( !iommu->reg )
@@ -1008,18 +965,8 @@ struct iommu *iommu_alloc(void *hw_data)
iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG);
- printk("iommu_alloc: cap = %"PRIx64"\n",iommu->cap);
- printk("iommu_alloc: ecap = %"PRIx64"\n", iommu->ecap);
-
spin_lock_init(&iommu->lock);
spin_lock_init(&iommu->register_lock);
-
- qi_ctrl = iommu_qi_ctrl(iommu);
- spin_lock_init(&qi_ctrl->qinval_lock);
- spin_lock_init(&qi_ctrl->qinval_poll_lock);
-
- ir_ctrl = iommu_ir_ctrl(iommu);
- spin_lock_init(&ir_ctrl->iremap_lock);
drhd->iommu = iommu;
return iommu;
@@ -1124,10 +1071,8 @@ static int domain_context_mapping_one(
if ( ecap_pass_thru(iommu->ecap) )
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
-#ifdef CONTEXT_PASSTHRU
else
{
-#endif
if ( !hd->pgd )
{
struct dma_pte *pgd = (struct dma_pte *)alloc_xenheap_page();
@@ -1142,9 +1087,7 @@ static int domain_context_mapping_one(
context_set_address_root(*context, virt_to_maddr(hd->pgd));
context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
-#ifdef CONTEXT_PASSTHRU
- }
-#endif
+ }
context_set_fault_enable(*context);
context_set_present(*context);
@@ -1519,6 +1462,7 @@ void iommu_domain_teardown(struct domain
if ( pgd[0].val != 0 )
free_xenheap_page((void*)maddr_to_virt(
dma_pte_addr(pgd[0])));
+
free_xenheap_page((void *)hd->pgd);
}
break;
@@ -1559,11 +1503,9 @@ int iommu_map_page(struct domain *d, pad
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
-#ifdef CONTEXT_PASSTHRU
/* do nothing if dom0 and iommu supports pass thru */
if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
return 0;
-#endif
pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K);
if ( !pg )
@@ -1596,11 +1538,9 @@ int iommu_unmap_page(struct domain *d, d
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
-#ifdef CONTEXT_PASSTHRU
/* do nothing if dom0 and iommu supports pass thru */
if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
return 0;
-#endif
dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K);
@@ -1771,7 +1711,7 @@ void __init setup_dom0_devices(void)
pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
}
-void clear_fault_bits(struct iommu *iommu)
+void clear_fault_bit(struct iommu *iommu)
{
u64 val;
@@ -1782,15 +1722,13 @@ void clear_fault_bits(struct iommu *iomm
iommu->reg,
cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
val);
- dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS);
+ dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO);
}
static int init_vtd_hw(void)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
- struct iommu_flush *flush = NULL;
- int vector;
int ret;
for_each_drhd_unit ( drhd )
@@ -1802,37 +1740,29 @@ static int init_vtd_hw(void)
gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: set root entry failed\n");
return -EIO;
}
-
+ }
+
+ return 0;
+}
+
+static int enable_vtd_translation(void)
+{
+ struct acpi_drhd_unit *drhd;
+ struct iommu *iommu;
+ int vector = 0;
+
+ for_each_drhd_unit ( drhd )
+ {
+ iommu = drhd->iommu;
vector = iommu_set_interrupt(iommu);
dma_msi_data_init(iommu, vector);
dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
iommu->vector = vector;
- clear_fault_bits(iommu);
- dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
-
- /* initialize flush functions */
- flush = iommu_get_flush(iommu);
- flush->context = flush_context_reg;
- flush->iotlb = flush_iotlb_reg;
-
- if ( qinval_setup(iommu) != 0);
- dprintk(XENLOG_ERR VTDPREFIX,
- "Queued Invalidation hardware not found\n");
- }
- return 0;
-}
-
-static int enable_vtd_translation(void)
-{
- struct acpi_drhd_unit *drhd;
- struct iommu *iommu;
-
- for_each_drhd_unit ( drhd )
- {
- iommu = drhd->iommu;
+ clear_fault_bit(iommu);
if ( iommu_enable_translation(iommu) )
return -EIO;
}
+
return 0;
}
@@ -1862,6 +1792,9 @@ int iommu_setup(void)
spin_lock_init(&domid_bitmap_lock);
INIT_LIST_HEAD(&hd->pdev_list);
+
+ /* start from scratch */
+ iommu_flush_all();
/* setup clflush size */
x86_clflush_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8;
@@ -1882,12 +1815,12 @@ int iommu_setup(void)
for ( i = 0; i < max_page; i++ )
iommu_map_page(dom0, i, i);
- enable_vtd_translation();
if ( init_vtd_hw() )
goto error;
setup_dom0_devices();
setup_dom0_rmrr();
- iommu_flush_all();
+ if ( enable_vtd_translation() )
+ goto error;
return 0;
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/qinval.c
--- a/xen/arch/x86/hvm/vmx/vtd/qinval.c Tue Jan 22 11:25:21 2008 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,456 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) Allen Kay <allen.m.kay@xxxxxxxxx>
- * Copyright (C) Xiaohui Xin <xiaohui.xin@xxxxxxxxx>
- */
-
-
-#include <xen/init.h>
-#include <xen/irq.h>
-#include <xen/spinlock.h>
-#include <xen/sched.h>
-#include <xen/xmalloc.h>
-#include <xen/domain_page.h>
-#include <asm/delay.h>
-#include <asm/string.h>
-#include <asm/iommu.h>
-#include <asm/hvm/vmx/intel-iommu.h>
-#include "dmar.h"
-#include "vtd.h"
-#include "pci-direct.h"
-#include "pci_regs.h"
-#include "msi.h"
-#include "extern.h"
-
-static void print_qi_regs(struct iommu *iommu)
-{
- u64 val;
-
- val = dmar_readq(iommu->reg, DMAR_IQA_REG);
- printk("DMAR_IAQ_REG = %"PRIx64"\n", val);
-
- val = dmar_readq(iommu->reg, DMAR_IQH_REG);
- printk("DMAR_IAH_REG = %"PRIx64"\n", val);
-
- val = dmar_readq(iommu->reg, DMAR_IQT_REG);
- printk("DMAR_IAT_REG = %"PRIx64"\n", val);
-}
-
-static int qinval_next_index(struct iommu *iommu)
-{
- u64 val;
- val = dmar_readq(iommu->reg, DMAR_IQT_REG);
- return (val >> 4);
-}
-
-static int qinval_update_qtail(struct iommu *iommu, int index)
-{
- u64 val;
-
- /* Need an ASSERT to insure that we have got register lock */
- val = (index < (QINVAL_ENTRY_NR-1)) ? (index + 1) : 0;
- dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << 4));
- return 0;
-}
-
-static int gen_cc_inv_dsc(struct iommu *iommu, int index,
- u16 did, u16 source_id, u8 function_mask, u8 granu)
-{
- u64 *ptr64;
- unsigned long flags;
- struct qinval_entry * qinval_entry = NULL;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
- qinval_entry = &qi_ctrl->qinval[index];
- qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
- qinval_entry->q.cc_inv_dsc.lo.granu = granu;
- qinval_entry->q.cc_inv_dsc.lo.res_1 = 0;
- qinval_entry->q.cc_inv_dsc.lo.did = did;
- qinval_entry->q.cc_inv_dsc.lo.sid = source_id;
- qinval_entry->q.cc_inv_dsc.lo.fm = function_mask;
- qinval_entry->q.cc_inv_dsc.lo.res_2 = 0;
- qinval_entry->q.cc_inv_dsc.hi.res = 0;
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
-
- ptr64 = (u64 *)qinval_entry;
- return 0;
-}
-
-int queue_invalidate_context(struct iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u8 granu)
-{
- int ret = -1;
- unsigned long flags;
- int index = -1;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- index = qinval_next_index(iommu);
- if (index == -1)
- return -EBUSY;
- ret = gen_cc_inv_dsc(iommu, index, did, source_id,
- function_mask, granu);
- ret |= qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
-}
-
-static int gen_iotlb_inv_dsc(struct iommu *iommu, int index,
- u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
-{
- unsigned long flags;
- struct qinval_entry * qinval_entry = NULL;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- if ( index == -1 )
- return -1;
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
-
- qinval_entry = &qi_ctrl->qinval[index];
- qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
- qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
- qinval_entry->q.iotlb_inv_dsc.lo.dr = 0;
- qinval_entry->q.iotlb_inv_dsc.lo.dw = 0;
- qinval_entry->q.iotlb_inv_dsc.lo.res_1 = 0;
- qinval_entry->q.iotlb_inv_dsc.lo.did = did;
- qinval_entry->q.iotlb_inv_dsc.lo.res_2 = 0;
-
- qinval_entry->q.iotlb_inv_dsc.hi.am = am;
- qinval_entry->q.iotlb_inv_dsc.hi.ih = ih;
- qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
- qinval_entry->q.iotlb_inv_dsc.hi.addr = addr;
-
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
- return 0;
-}
-
-int queue_invalidate_iotlb(struct iommu *iommu,
- u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
-{
- int ret = -1;
- unsigned long flags;
- int index = -1;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- index = qinval_next_index(iommu);
- ret = gen_iotlb_inv_dsc(iommu, index, granu, dr, dw, did,
- am, ih, addr);
- ret |= qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
-}
-
-static int gen_wait_dsc(struct iommu *iommu, int index,
- u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
-{
- u64 *ptr64;
- unsigned long flags;
- struct qinval_entry * qinval_entry = NULL;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- if ( index == -1 )
- return -1;
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
- qinval_entry = &qi_ctrl->qinval[index];
- qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
- qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
- qinval_entry->q.inv_wait_dsc.lo.sw = sw;
- qinval_entry->q.inv_wait_dsc.lo.fn = fn;
- qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
- qinval_entry->q.inv_wait_dsc.lo.sdata = sdata;
- qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
- qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(saddr) >> 2;
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
- ptr64 = (u64 *)qinval_entry;
- return 0;
-}
-
-static int queue_invalidate_wait(struct iommu *iommu,
- u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
-{
- unsigned long flags;
- unsigned long start_time;
- int index = -1;
- int ret = -1;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- spin_lock_irqsave(&qi_ctrl->qinval_poll_lock, flags);
- spin_lock_irqsave(&iommu->register_lock, flags);
- index = qinval_next_index(iommu);
- if (*saddr == 1)
- *saddr = 0;
- ret = gen_wait_dsc(iommu, index, iflag, sw, fn, sdata, saddr);
- ret |= qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- /* Now we don't support interrupt method */
- if ( sw )
- {
- /* In case all wait descriptor writes to same addr with same data */
- start_time = jiffies;
- while ( *saddr != 1 ) {
- if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT)) {
- print_qi_regs(iommu);
- panic("queue invalidate wait descriptor was not executed\n");
- }
- cpu_relax();
- }
- }
- spin_unlock_irqrestore(&qi_ctrl->qinval_poll_lock, flags);
- return ret;
-}
-
-int invalidate_sync(struct iommu *iommu)
-{
- int ret = -1;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- if (qi_ctrl->qinval)
- {
- ret = queue_invalidate_wait(iommu,
- 0, 1, 1, 1, &qi_ctrl->qinval_poll_status);
- return ret;
- }
- return 0;
-}
-
-static int gen_dev_iotlb_inv_dsc(struct iommu *iommu, int index,
- u32 max_invs_pend, u16 sid, u16 size, u64 addr)
-{
- unsigned long flags;
- struct qinval_entry * qinval_entry = NULL;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- if ( index == -1 )
- return -1;
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
-
- qinval_entry = &qi_ctrl->qinval[index];
- qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
- qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
- qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = max_invs_pend;
- qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0;
- qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = sid;
- qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;
-
- qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
- qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr;
-
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
- return 0;
-}
-
-int queue_invalidate_device_iotlb(struct iommu *iommu,
- u32 max_invs_pend, u16 sid, u16 size, u64 addr)
-{
- int ret = -1;
- unsigned long flags;
- int index = -1;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- index = qinval_next_index(iommu);
- ret = gen_dev_iotlb_inv_dsc(iommu, index, max_invs_pend,
- sid, size, addr);
- ret |= qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
-}
-
-static int gen_iec_inv_dsc(struct iommu *iommu, int index,
- u8 granu, u8 im, u16 iidx)
-{
- unsigned long flags;
- struct qinval_entry * qinval_entry = NULL;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- if ( index == -1 )
- return -1;
- spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
-
- qinval_entry = &qi_ctrl->qinval[index];
- qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
- qinval_entry->q.iec_inv_dsc.lo.granu = granu;
- qinval_entry->q.iec_inv_dsc.lo.res_1 = 0;
- qinval_entry->q.iec_inv_dsc.lo.im = im;
- qinval_entry->q.iec_inv_dsc.lo.iidx = iidx;
- qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
- qinval_entry->q.iec_inv_dsc.hi.res = 0;
-
- spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
- return 0;
-}
-
-int queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
-{
- int ret;
- unsigned long flags;
- int index = -1;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- index = qinval_next_index(iommu);
- ret = gen_iec_inv_dsc(iommu, index, granu, im, iidx);
- ret |= qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- return ret;
-}
-
-u64 iec_cap;
-int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
-{
- int ret;
- ret = queue_invalidate_iec(iommu, granu, im, iidx);
- ret |= invalidate_sync(iommu);
-
- /*
- * reading vt-d architecture register will ensure
- * draining happens in implementation independent way.
- */
- iec_cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
- return ret;
-}
-
-int iommu_flush_iec_global(struct iommu *iommu)
-{
- return __iommu_flush_iec(iommu, IEC_GLOBAL_INVL, 0, 0);
-}
-
-int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
-{
- return __iommu_flush_iec(iommu, IEC_INDEX_INVL, im, iidx);
-}
-
-static int flush_context_qi(
- void *_iommu, u16 did, u16 sid, u8 fm, u64 type,
- int non_present_entry_flush)
-{
- int ret = 0;
- struct iommu *iommu = (struct iommu *)_iommu;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if ( non_present_entry_flush )
- {
- if ( !cap_caching_mode(iommu->cap) )
- return 1;
- else
- did = 0;
- }
-
- if (qi_ctrl->qinval)
- {
- ret = queue_invalidate_context(iommu, did, sid, fm,
- type >> DMA_CCMD_INVL_GRANU_OFFSET);
- ret |= invalidate_sync(iommu);
- }
- return ret;
-}
-
-static int flush_iotlb_qi(
- void *_iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
-{
- u8 dr = 0, dw = 0;
- int ret = 0;
- struct iommu *iommu = (struct iommu *)_iommu;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if ( non_present_entry_flush )
- {
- if ( !cap_caching_mode(iommu->cap) )
- return 1;
- else
- did = 0;
- }
-
- if (qi_ctrl->qinval) {
- /* use queued invalidation */
- if (cap_write_drain(iommu->cap))
- dw = 1;
- if (cap_read_drain(iommu->cap))
- dr = 1;
- /* Need to conside the ih bit later */
- ret = queue_invalidate_iotlb(iommu,
- (type >> DMA_TLB_FLUSH_GRANU_OFFSET), dr,
- dw, did, (u8)size_order, 0, addr);
- ret |= invalidate_sync(iommu);
- }
- return ret;
-}
-
-int qinval_setup(struct iommu *iommu)
-{
- unsigned long start_time;
- u64 paddr;
- u32 status = 0;
- struct qi_ctrl *qi_ctrl;
- struct iommu_flush *flush;
-
- qi_ctrl = iommu_qi_ctrl(iommu);
- flush = iommu_get_flush(iommu);
-
- if ( !ecap_queued_inval(iommu->ecap) )
- return -ENODEV;
-
- if (qi_ctrl->qinval == NULL) {
- qi_ctrl->qinval = alloc_xenheap_page();
- if (qi_ctrl->qinval == NULL)
- panic("Cannot allocate memory for qi_ctrl->qinval\n");
- memset((u8*)qi_ctrl->qinval, 0, PAGE_SIZE_4K);
- flush->context = flush_context_qi;
- flush->iotlb = flush_iotlb_qi;
- }
- paddr = virt_to_maddr(qi_ctrl->qinval);
-
- /* Setup Invalidation Queue Address(IQA) register with the
- * address of the page we just allocated. QS field at
- * bits[2:0] to indicate size of queue is one 4KB page.
- * That's 256 entries. Queued Head (IQH) and Queue Tail (IQT)
- * registers are automatically reset to 0 with write
- * to IQA register.
- */
- dmar_writeq(iommu->reg, DMAR_IQA_REG, paddr);
-
- /* enable queued invalidation hardware */
- iommu->gcmd |= DMA_GCMD_QIE;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
-
- /* Make sure hardware complete it */
- start_time = jiffies;
- while (1) {
- status = dmar_readl(iommu->reg, DMAR_GSTS_REG);
- if (status & DMA_GSTS_QIES)
- break;
- if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))
- panic("Cannot set QIE field for queue invalidation\n");
- cpu_relax();
- }
- status = 0;
- return status;
-}
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/utils.c
--- a/xen/arch/x86/hvm/vmx/vtd/utils.c Tue Jan 22 11:25:21 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/utils.c Thu Jan 24 14:49:23 2008 +0000
@@ -146,14 +146,12 @@ u32 get_level_index(unsigned long gmfn,
return gmfn & LEVEL_MASK;
}
-void print_vtd_entries(
- struct domain *d,
- struct iommu *iommu,
- int bus, int devfn,
- unsigned long gmfn)
+void print_vtd_entries(struct domain *d, int bus, int devfn,
+ unsigned long gmfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct acpi_drhd_unit *drhd;
+ struct iommu *iommu;
struct context_entry *ctxt_entry;
struct root_entry *root_entry;
struct dma_pte pte;
@@ -177,6 +175,7 @@ void print_vtd_entries(
{
printk("---- print_vtd_entries %d ----\n", i++);
+ iommu = drhd->iommu;
root_entry = iommu->root_entry;
if ( root_entry == NULL )
{
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/arch/x86/hvm/vmx/vtd/vtd.h
--- a/xen/arch/x86/hvm/vmx/vtd/vtd.h Tue Jan 22 11:25:21 2008 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) Allen Kay <allen.m.kay@xxxxxxxxx>
- * Copyright (C) Weidong Han <weidong.han@xxxxxxxxx>
- */
-
-#ifndef _VTD_H_
-#define _VTD_H_
-
-#include <xen/list.h>
-#include <asm/iommu.h>
-
-#define VTDPREFIX "[VT-D]"
-
-#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
-#define time_after(a,b) \
- (typecheck(unsigned long, a) && \
- typecheck(unsigned long, b) && \
- ((long)(b) - (long)(a) < 0))
-
-struct IO_APIC_route_remap_entry {
- union {
- u64 val;
- struct {
- u64 vector:8,
- delivery_mode:3,
- index_15:1,
- delivery_status:1,
- polarity:1,
- irr:1,
- trigger:1,
- mask:1,
- reserved:31,
- format:1,
- index_0_14:15;
- };
- };
-};
-
-#endif // _VTD_H_
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/include/asm-x86/hvm/vmx/intel-iommu.h
--- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h Tue Jan 22 11:25:21 2008 +0000
+++ b/xen/include/asm-x86/hvm/vmx/intel-iommu.h Thu Jan 24 14:49:23 2008 +0000
@@ -127,34 +127,32 @@
#define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
/* GCMD_REG */
-#define DMA_GCMD_TE (((u64)1) << 31)
-#define DMA_GCMD_SRTP (((u64)1) << 30)
-#define DMA_GCMD_SFL (((u64)1) << 29)
-#define DMA_GCMD_EAFL (((u64)1) << 28)
-#define DMA_GCMD_WBF (((u64)1) << 27)
-#define DMA_GCMD_QIE (((u64)1) << 26)
-#define DMA_GCMD_IRE (((u64)1) << 25)
-#define DMA_GCMD_SIRTP (((u64)1) << 24)
-#define DMA_GCMD_CFI (((u64)1) << 23)
+#define DMA_GCMD_TE (((u64)1) << 31)
+#define DMA_GCMD_SRTP (((u64)1) << 30)
+#define DMA_GCMD_SFL (((u64)1) << 29)
+#define DMA_GCMD_EAFL (((u64)1) << 28)
+#define DMA_GCMD_WBF (((u64)1) << 27)
+#define DMA_GCMD_QIE (((u64)1) << 26)
+#define DMA_GCMD_IRE (((u64)1) << 25)
+#define DMA_GCMD_SIRTP (((u64)1) << 24)
/* GSTS_REG */
-#define DMA_GSTS_TES (((u64)1) << 31)
-#define DMA_GSTS_RTPS (((u64)1) << 30)
-#define DMA_GSTS_FLS (((u64)1) << 29)
-#define DMA_GSTS_AFLS (((u64)1) << 28)
-#define DMA_GSTS_WBFS (((u64)1) << 27)
+#define DMA_GSTS_TES (((u64)1) << 31)
+#define DMA_GSTS_RTPS (((u64)1) << 30)
+#define DMA_GSTS_FLS (((u64)1) << 29)
+#define DMA_GSTS_AFLS (((u64)1) << 28)
+#define DMA_GSTS_WBFS (((u64)1) << 27)
+#define DMA_GSTS_IRTPS (((u64)1) << 24)
#define DMA_GSTS_QIES (((u64)1) <<26)
#define DMA_GSTS_IRES (((u64)1) <<25)
-#define DMA_GSTS_SIRTPS (((u64)1) << 24)
-#define DMA_GSTS_CFIS (((u64)1) <<23)
/* PMEN_REG */
-#define DMA_PMEN_EPM (((u32)1) << 31)
-#define DMA_PMEN_PRS (((u32)1) << 0)
+#define DMA_PMEN_EPM (((u32)1) << 31)
+#define DMA_PMEN_PRS (((u32)1) << 0)
/* CCMD_REG */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
@@ -173,14 +171,8 @@
#define DMA_FECTL_IM (((u64)1) << 31)
/* FSTS_REG */
-#define DMA_FSTS_PFO ((u64)1 << 0)
-#define DMA_FSTS_PPF ((u64)1 << 1)
-#define DMA_FSTS_AFO ((u64)1 << 2)
-#define DMA_FSTS_APF ((u64)1 << 3)
-#define DMA_FSTS_IQE ((u64)1 << 4)
-#define DMA_FSTS_ICE ((u64)1 << 5)
-#define DMA_FSTS_ITE ((u64)1 << 6)
-#define DMA_FSTS_FAULTS DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO |
DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
+#define DMA_FSTS_PPF ((u64)2)
+#define DMA_FSTS_PFO ((u64)1)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
@@ -274,10 +266,8 @@ struct dma_pte {
/* interrupt remap entry */
struct iremap_entry {
- union {
- u64 lo_val;
struct {
- u64 p : 1,
+ u64 present : 1,
fpd : 1,
dm : 1,
rh : 1,
@@ -289,16 +279,12 @@ struct iremap_entry {
res_2 : 8,
dst : 32;
}lo;
- };
- union {
- u64 hi_val;
struct {
u64 sid : 16,
sq : 2,
svt : 2,
res_1 : 44;
}hi;
- };
};
#define IREMAP_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct iremap_entry))
#define iremap_present(v) ((v).lo & 1)
@@ -400,11 +386,11 @@ struct poll_info {
#define RESERVED_VAL 0
-#define TYPE_INVAL_CONTEXT 0x1
-#define TYPE_INVAL_IOTLB 0x2
-#define TYPE_INVAL_DEVICE_IOTLB 0x3
-#define TYPE_INVAL_IEC 0x4
-#define TYPE_INVAL_WAIT 0x5
+#define TYPE_INVAL_CONTEXT 1
+#define TYPE_INVAL_IOTLB 2
+#define TYPE_INVAL_DEVICE_IOTLB 3
+#define TYPE_INVAL_IEC 4
+#define TYPE_INVAL_WAIT 5
#define NOTIFY_TYPE_POLL 1
#define NOTIFY_TYPE_INTR 1
@@ -414,10 +400,6 @@ struct poll_info {
#define IEC_GLOBAL_INVL 0
#define IEC_INDEX_INVL 1
-#define IRTA_REG_EIME_SHIFT 11
-#define IRTA_REG_TABLE_SIZE 7 // 4k page = 256 * 16 byte entries
- // 2^^(IRTA_REG_TABLE_SIZE + 1) = 256
- // IRTA_REG_TABLE_SIZE = 7
#define VTD_PAGE_TABLE_LEVEL_3 3
#define VTD_PAGE_TABLE_LEVEL_4 4
@@ -432,29 +414,4 @@ extern struct list_head acpi_rmrr_units;
extern struct list_head acpi_rmrr_units;
extern struct list_head acpi_ioapic_units;
-struct qi_ctrl {
- struct qinval_entry *qinval; /* queue invalidation page */
- int qinval_index; /* queue invalidation index */
- spinlock_t qinval_lock; /* lock for queue invalidation page */
- spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
- volatile u32 qinval_poll_status; /* used by poll methord to sync */
-};
-
-struct ir_ctrl {
- struct iremap_entry *iremap; /* interrupt remap table */
- int iremap_index; /* interrupt remap index */
- spinlock_t iremap_lock; /* lock for irq remappping table */
-};
-
-struct iommu_flush {
- int (*context)(void *iommu, u16 did, u16 source_id, u8 function_mask, u64
type, int non_present_entry_flush);
- int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, u64
type, int non_present_entry_flush);
-};
-
-struct intel_iommu {
- struct qi_ctrl qi_ctrl;
- struct ir_ctrl ir_ctrl;
- struct iommu_flush flush;
-};
-
#endif
diff -r 430b2159c4b7 -r b96a1adbcac0 xen/include/asm-x86/iommu.h
--- a/xen/include/asm-x86/iommu.h Tue Jan 22 11:25:21 2008 +0000
+++ b/xen/include/asm-x86/iommu.h Thu Jan 24 14:49:23 2008 +0000
@@ -31,9 +31,6 @@ extern int vtd_enabled;
#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
#define domain_vmx_iommu(d) (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
-#define iommu_qi_ctrl(iommu) (&(iommu->intel.qi_ctrl));
-#define iommu_ir_ctrl(iommu) (&(iommu->intel.ir_ctrl));
-#define iommu_get_flush(iommu) (&(iommu->intel.flush));
/*
* The PCI interface treats multi-function devices as independent
@@ -64,7 +61,6 @@ struct iommu {
spinlock_t register_lock; /* protect iommu register handling */
struct root_entry *root_entry; /* virtual address */
unsigned int vector;
- struct intel_iommu intel;
};
int iommu_setup(void);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|