WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] VT-d: flush iotlb of selective iommu when

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] VT-d: flush iotlb of selective iommu when a domain's VT-d table is changed
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 26 May 2008 01:00:09 -0700
Delivery-date: Mon, 26 May 2008 01:00:12 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1211786736 -3600
# Node ID 9a7a6f729d2c0352a772aa274454dee1c96faa5a
# Parent  c0c0f4fa88509b6f5e4fa9bf0687c7a6df53eeb8
VT-d: flush iotlb of selective iommu when a domain's VT-d table is changed

When a domain's VT-d table is changed, only the iommus under which the
domain has assigned devices need to be flushed.

Signed-off-by: Yang, Xiaowei <xiaowei.yang@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/iommu.c |   39 ++++++++++++++++++++++++++++++++++--
 xen/include/xen/hvm/iommu.h         |    1 
 xen/include/xen/iommu.h             |    1 
 3 files changed, 39 insertions(+), 2 deletions(-)

diff -r c0c0f4fa8850 -r 9a7a6f729d2c xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Mon May 26 08:24:55 2008 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Mon May 26 08:25:36 2008 +0100
@@ -572,6 +572,7 @@ void iommu_flush_all(void)
 /* clear one page's page table */
 static void dma_pte_clear_one(struct domain *domain, u64 addr)
 {
+    struct hvm_iommu *hd = domain_hvm_iommu(domain);
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
     struct dma_pte *page = NULL, *pte = NULL;
@@ -593,6 +594,10 @@ static void dma_pte_clear_one(struct dom
         for_each_drhd_unit ( drhd )
         {
             iommu = drhd->iommu;
+
+            if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+                continue;
+
             if ( cap_caching_mode(iommu->cap) )
                 iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
                                       addr, 1, 0);
@@ -1022,7 +1027,7 @@ static int iommu_alloc(struct acpi_drhd_
 
     set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address);
     iommu->reg = (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
-    nr_iommus++;
+    iommu->index = nr_iommus++;
 
     iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
     iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG);
@@ -1191,6 +1196,8 @@ static int domain_context_mapping_one(
         iommu_flush_write_buffer(iommu);
     else
         iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
+
+    set_bit(iommu->index, &hd->iommu_bitmap);
     spin_unlock_irqrestore(&iommu->lock, flags);
 
     return 0;
@@ -1418,11 +1425,12 @@ void reassign_device_ownership(
 {
     struct hvm_iommu *source_hd = domain_hvm_iommu(source);
     struct hvm_iommu *target_hd = domain_hvm_iommu(target);
-    struct pci_dev *pdev;
+    struct pci_dev *pdev, *pdev2;
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
     int status;
     unsigned long flags;
+    int found = 0;
 
     pdev_flr(bus, devfn);
 
@@ -1443,6 +1451,18 @@ void reassign_device_ownership(
     list_move(&pdev->list, &target_hd->pdev_list);
     spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
     spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
+
+    for_each_pdev ( source, pdev2 )
+    {
+        drhd = acpi_find_matched_drhd_unit(pdev2);
+        if ( drhd->iommu == iommu )
+        {
+            found = 1;
+            break;
+        }
+    }
+    if ( !found )
+        clear_bit(iommu->index, &source_hd->iommu_bitmap);
 
     status = domain_context_mapping(target, iommu, pdev);
     if ( status != 0 )
@@ -1500,6 +1520,7 @@ int intel_iommu_map_page(
 int intel_iommu_map_page(
     struct domain *d, unsigned long gfn, unsigned long mfn)
 {
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
     struct dma_pte *page = NULL, *pte = NULL;
@@ -1527,6 +1548,10 @@ int intel_iommu_map_page(
     for_each_drhd_unit ( drhd )
     {
         iommu = drhd->iommu;
+
+        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+            continue;
+
         if ( cap_caching_mode(iommu->cap) )
             iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
                                   (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
@@ -1578,6 +1603,7 @@ int iommu_page_mapping(struct domain *do
 int iommu_page_mapping(struct domain *domain, paddr_t iova,
                        paddr_t hpa, size_t size, int prot)
 {
+    struct hvm_iommu *hd = domain_hvm_iommu(domain);
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
     u64 start_pfn, end_pfn;
@@ -1611,6 +1637,10 @@ int iommu_page_mapping(struct domain *do
     for_each_drhd_unit ( drhd )
     {
         iommu = drhd->iommu;
+
+        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+            continue;
+
         if ( cap_caching_mode(iommu->cap) )
             iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
                                   iova, index, 0);
@@ -1630,6 +1660,7 @@ int iommu_page_unmapping(struct domain *
 
 void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry)
 {
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu = NULL;
     struct dma_pte *pte = (struct dma_pte *) p2m_entry;
@@ -1637,6 +1668,10 @@ void iommu_flush(struct domain *d, unsig
     for_each_drhd_unit ( drhd )
     {
         iommu = drhd->iommu;
+
+        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+            continue;
+
         if ( cap_caching_mode(iommu->cap) )
             iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
                                   (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
diff -r c0c0f4fa8850 -r 9a7a6f729d2c xen/include/xen/hvm/iommu.h
--- a/xen/include/xen/hvm/iommu.h       Mon May 26 08:24:55 2008 +0100
+++ b/xen/include/xen/hvm/iommu.h       Mon May 26 08:25:36 2008 +0100
@@ -43,6 +43,7 @@ struct hvm_iommu {
     int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
     struct list_head g2m_ioport_list;  /* guest to machine ioport mapping */
     domid_t iommu_domid;           /* domain id stored in iommu */
+    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses 
*/
 
     /* amd iommu support */
     int domain_id;
diff -r c0c0f4fa8850 -r 9a7a6f729d2c xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Mon May 26 08:24:55 2008 +0100
+++ b/xen/include/xen/iommu.h   Mon May 26 08:25:36 2008 +0100
@@ -44,6 +44,7 @@ struct iommu {
 struct iommu {
     struct list_head list;
     void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+    u32        index;         /* Sequence number of iommu */
     u32        gcmd;          /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
     u64        cap;
     u64        ecap;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] VT-d: flush iotlb of selective iommu when a domain's VT-d table is changed, Xen patchbot-unstable <=