WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/mm: Implement p2m table sharing for A

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/mm: Implement p2m table sharing for AMD IOMMU.
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 21 Apr 2011 09:20:13 +0100
Delivery-date: Thu, 21 Apr 2011 01:21:15 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1303143861 -3600
# Node ID 835550a0c6c0064fcd65e70d209c5deb8ae756c8
# Parent  e37b600d5f1458649c5b90d526d69ef5d859cf35
x86/mm: Implement p2m table sharing for AMD IOMMU.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Committed-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r e37b600d5f14 -r 835550a0c6c0 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Apr 18 17:24:21 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Mon Apr 18 17:24:21 2011 +0100
@@ -35,6 +35,7 @@
 #include <asm/mem_sharing.h>
 #include <xen/event.h>
 #include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
 
 /* Debugging and auditing of the P2M code? */
 #define P2M_AUDIT     0
@@ -1418,6 +1419,7 @@
     unsigned int iommu_pte_flags = (p2mt == p2m_ram_rw) ?
                                    IOMMUF_readable|IOMMUF_writable:
                                    0; 
+    unsigned long old_mfn = 0;
 
     if ( tb_init_done )
     {
@@ -1468,7 +1470,10 @@
         entry_content.l1 = l3e_content.l3;
 
         if ( entry_content.l1 != 0 )
+        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
+            old_mfn = l1e_get_pfn(*p2m_entry);
+        }
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1510,8 +1515,10 @@
             entry_content = l1e_empty();
 
         if ( entry_content.l1 != 0 )
+        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
-
+            old_mfn = l1e_get_pfn(*p2m_entry);
+        }
         /* level 1 entry */
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1544,7 +1551,10 @@
         entry_content.l1 = l2e_content.l2;
 
         if ( entry_content.l1 != 0 )
+        {
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
+            old_mfn = l1e_get_pfn(*p2m_entry);
+        }
 
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1561,13 +1571,21 @@
 
     if ( iommu_enabled && need_iommu(p2m->domain) )
     {
-        if ( p2mt == p2m_ram_rw )
-            for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
-                               IOMMUF_readable|IOMMUF_writable);
+        if ( iommu_hap_pt_share )
+        {
+            if ( old_mfn && (old_mfn != mfn_x(mfn)) )
+                amd_iommu_flush_pages(p2m->domain, gfn, page_order);
+        }
         else
-            for ( int i = 0; i < (1UL << page_order); i++ )
-                iommu_unmap_page(p2m->domain, gfn+i);
+        {
+            if ( p2mt == p2m_ram_rw )
+                for ( i = 0; i < (1UL << page_order); i++ )
+                    iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
+                                   IOMMUF_readable|IOMMUF_writable);
+            else
+                for ( int i = 0; i < (1UL << page_order); i++ )
+                    iommu_unmap_page(p2m->domain, gfn+i);
+        }
     }
 
     /* Success */
diff -r e37b600d5f14 -r 835550a0c6c0 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Mon Apr 18 17:24:21 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Mon Apr 18 17:24:21 2011 +0100
@@ -889,7 +889,7 @@
 {
     struct domain *d;
     for_each_domain( d )
-        invalidate_all_iommu_pages(d);
+        amd_iommu_flush_all_pages(d);
 }
 
 static void invalidate_all_devices(void)
diff -r e37b600d5f14 -r 835550a0c6c0 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Mon Apr 18 17:24:21 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Mon Apr 18 17:24:21 2011 +0100
@@ -19,6 +19,7 @@
  */
 
 #include <xen/sched.h>
+#include <asm/p2m.h>
 #include <xen/hvm/iommu.h>
 #include <asm/amd-iommu.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
@@ -184,20 +185,33 @@
     unmap_domain_page(l1_table);
 }
 
-static void set_iommu_l1e_present(u64 l2e, unsigned long gfn,
+static int set_iommu_l1e_present(u64 l2e, unsigned long gfn,
                                  u64 maddr, int iw, int ir)
 {
-    u64 addr_lo, addr_hi;
+    u64 addr_lo, addr_hi, maddr_old;
     u32 entry;
     void *l1_table;
     int offset;
     u32 *l1e;
+    int need_flush = 0;
 
     l1_table = map_domain_page(l2e >> PAGE_SHIFT);
 
     offset = gfn & (~PTE_PER_TABLE_MASK);
     l1e = (u32*)((u8*)l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
 
+    addr_hi = get_field_from_reg_u32(l1e[1],
+                                     IOMMU_PTE_ADDR_HIGH_MASK,
+                                     IOMMU_PTE_ADDR_HIGH_SHIFT);
+    addr_lo = get_field_from_reg_u32(l1e[0],
+                                     IOMMU_PTE_ADDR_LOW_MASK,
+                                     IOMMU_PTE_ADDR_LOW_SHIFT);
+
+    maddr_old = ((addr_hi << 32) | addr_lo) << PAGE_SHIFT;
+
+    if ( maddr_old && (maddr_old != maddr) )
+        need_flush = 1;
+
     addr_lo = maddr & DMA_32BIT_MASK;
     addr_hi = maddr >> 32;
 
@@ -226,6 +240,7 @@
     l1e[0] = entry;
 
     unmap_domain_page(l1_table);
+    return need_flush;
 }
 
 static void amd_iommu_set_page_directory_entry(u32 *pde, 
@@ -551,7 +566,7 @@
         }
 
         /* For safety, invalidate all entries */
-        invalidate_all_iommu_pages(d);
+        amd_iommu_flush_all_pages(d);
     }
     return 0;
 }
@@ -560,10 +575,14 @@
                        unsigned int flags)
 {
     u64 iommu_l2e;
+    int need_flush = 0;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
     BUG_ON( !hd->root_table );
 
+    if ( iommu_hap_pt_share && is_hvm_domain(d) )
+        return 0;
+
     spin_lock(&hd->mapping_lock);
 
     /* Since HVM domain is initialized with 2 level IO page table,
@@ -587,9 +606,11 @@
         return -EFAULT;
     }
 
-    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT,
-                          !!(flags & IOMMUF_writable),
-                          !!(flags & IOMMUF_readable));
+    need_flush = set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT,
+                                       !!(flags & IOMMUF_writable),
+                                       !!(flags & IOMMUF_readable));
+    if ( need_flush )
+        amd_iommu_flush_pages(d, gfn, 0);
 
     spin_unlock(&hd->mapping_lock);
     return 0;
@@ -598,12 +619,13 @@
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
 {
     u64 iommu_l2e;
-    unsigned long flags;
-    struct amd_iommu *iommu;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
     BUG_ON( !hd->root_table );
 
+    if ( iommu_hap_pt_share && is_hvm_domain(d) )
+        return 0;
+
     spin_lock(&hd->mapping_lock);
 
     /* Since HVM domain is initialized with 2 level IO page table,
@@ -632,14 +654,7 @@
     clear_iommu_l1e_present(iommu_l2e, gfn);
     spin_unlock(&hd->mapping_lock);
 
-    /* send INVALIDATE_IOMMU_PAGES command */
-    for_each_amd_iommu ( iommu )
-    {
-        spin_lock_irqsave(&iommu->lock, flags);
-        invalidate_iommu_pages(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id, 
0);
-        flush_command_buffer(iommu);
-        spin_unlock_irqrestore(&iommu->lock, flags);
-    }
+    amd_iommu_flush_pages(d, gfn, 0);
 
     return 0;
 }
@@ -667,17 +682,59 @@
     return 0;
 }
 
-void invalidate_all_iommu_pages(struct domain *d)
+
+/* Flush iommu cache after p2m changes. */
+static void _amd_iommu_flush_pages(struct domain *d,
+                                   uint64_t gaddr, unsigned int order)
 {
     unsigned long flags;
     struct amd_iommu *iommu;
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+    unsigned int dom_id = hd->domain_id;
 
+    /* send INVALIDATE_IOMMU_PAGES command */
     for_each_amd_iommu ( iommu )
     {
         spin_lock_irqsave(&iommu->lock, flags);
-        invalidate_iommu_pages(iommu, 0x7FFFFFFFFFFFF000ULL,
-                               d->domain_id, 0);
+        invalidate_iommu_pages(iommu, gaddr, dom_id, order);
         flush_command_buffer(iommu);
         spin_unlock_irqrestore(&iommu->lock, flags);
     }
 }
+
+void amd_iommu_flush_all_pages(struct domain *d)
+{
+    _amd_iommu_flush_pages(d, 0x7FFFFFFFFFFFFULL, 0);
+}
+
+void amd_iommu_flush_pages(struct domain *d,
+                           unsigned long gfn, unsigned int order)
+{
+    _amd_iommu_flush_pages(d, (uint64_t) gfn << PAGE_SHIFT, order);
+}
+
+/* Share p2m table with iommu. */
+void amd_iommu_share_p2m(struct domain *d)
+{
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+    struct page_info *p2m_table;
+    mfn_t pgd_mfn;
+
+    ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
+
+    pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
+    p2m_table = mfn_to_page(mfn_x(pgd_mfn));
+
+    if ( hd->root_table != p2m_table )
+    {
+        free_amd_iommu_pgtable(hd->root_table);
+        hd->root_table = p2m_table;
+
+        /* When sharing p2m with iommu, paging mode = 4 */
+        hd->paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
+        iommu_hap_pt_share = 1;
+
+        AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = 0x%lx\n",
+                        mfn_x(pgd_mfn));
+    }
+}
diff -r e37b600d5f14 -r 835550a0c6c0 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Mon Apr 18 17:24:21 
2011 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Mon Apr 18 17:24:21 
2011 +0100
@@ -362,6 +362,9 @@
 {
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
 
+    if ( iommu_hap_pt_share )
+        return;
+
     spin_lock(&hd->mapping_lock);
     if ( hd->root_table )
     {
@@ -375,7 +378,7 @@
 static void amd_iommu_domain_destroy(struct domain *d)
 {
     deallocate_iommu_page_tables(d);
-    invalidate_all_iommu_pages(d);
+    amd_iommu_flush_all_pages(d);
 }
 
 static int amd_iommu_return_device(
diff -r e37b600d5f14 -r 835550a0c6c0 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Mon Apr 18 17:24:21 
2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Mon Apr 18 17:24:21 
2011 +0100
@@ -51,10 +51,17 @@
 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
                        unsigned int flags);
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
+                           unsigned int order);
+void amd_iommu_flush_all_pages(struct domain *d);
+
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
-        u64 phys_addr, unsigned long size, int iw, int ir);
-void invalidate_all_iommu_pages(struct domain *d);
+                                       u64 phys_addr, unsigned long size,
+                                       int iw, int ir);
+
+/* Share p2m table with iommu */
+void amd_iommu_share_p2m(struct domain *d);
 
 /* device table functions */
 int get_dma_requestor_id(u16 bdf);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/mm: Implement p2m table sharing for AMD IOMMU., Xen patchbot-unstable <=