WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/mm: add AMD IOMMU control bits to p2m

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/mm: add AMD IOMMU control bits to p2m entries.
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 21 Apr 2011 09:20:12 +0100
Delivery-date: Thu, 21 Apr 2011 01:20:56 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1303143861 -3600
# Node ID e37b600d5f1458649c5b90d526d69ef5d859cf35
# Parent  78145a98915c577b4c78558e660dd6d1c5e7c902
x86/mm: add AMD IOMMU control bits to p2m entries.

This patch adds next levels bit into bit 9 - bit 11 of p2m entries and
adds r/w permission bits into bit 61- bit 62 of p2m entries.

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Committed-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r 78145a98915c -r e37b600d5f14 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Apr 18 17:24:21 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Mon Apr 18 17:24:21 2011 +0100
@@ -201,6 +201,19 @@
 // Walk one level of the P2M table, allocating a new table if required.
 // Returns 0 on error.
 //
+
+/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */
+#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21))
+
+static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
+                                unsigned int nlevel, unsigned int flags)
+{
+#if CONFIG_PAGING_LEVELS == 4
+    if ( iommu_hap_pt_share )
+        l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
+#endif
+}
+
 static int
 p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
                unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
@@ -230,6 +243,7 @@
 
         switch ( type ) {
         case PGT_l3_page_table:
+            p2m_add_iommu_flags(&new_entry, 3, 
IOMMUF_readable|IOMMUF_writable);
             p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 
4);
             break;
         case PGT_l2_page_table:
@@ -237,9 +251,11 @@
             /* for PAE mode, PDPE only has PCD/PWT/P bits available */
             new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
 #endif
+            p2m_add_iommu_flags(&new_entry, 2, 
IOMMUF_readable|IOMMUF_writable);
             p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 
3);
             break;
         case PGT_l1_page_table:
+            p2m_add_iommu_flags(&new_entry, 1, 
IOMMUF_readable|IOMMUF_writable);
             p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 
2);
             break;
         default:
@@ -267,12 +283,14 @@
         for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
         {
             new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags);
+            p2m_add_iommu_flags(&new_entry, 1, 
IOMMUF_readable|IOMMUF_writable);
             p2m->write_p2m_entry(p2m, gfn,
                 l1_entry+i, *table_mfn, new_entry, 2);
         }
         unmap_domain_page(l1_entry);
         new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                  __PAGE_HYPERVISOR|_PAGE_USER); //disable PSE
+        p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 3);
     }
 
@@ -300,6 +318,7 @@
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
             new_entry = l1e_from_pfn(pfn + i, flags);
+            p2m_add_iommu_flags(&new_entry, 0, 0);
             p2m->write_p2m_entry(p2m, gfn,
                 l1_entry+i, *table_mfn, new_entry, 1);
         }
@@ -307,6 +326,7 @@
         
         new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                  __PAGE_HYPERVISOR|_PAGE_USER);
+        p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
         p2m->write_p2m_entry(p2m, gfn,
             p2m_entry, *table_mfn, new_entry, 2);
     }
@@ -1395,6 +1415,9 @@
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
     int rv=0;
+    unsigned int iommu_pte_flags = (p2mt == p2m_ram_rw) ?
+                                   IOMMUF_readable|IOMMUF_writable:
+                                   0; 
 
     if ( tb_init_done )
     {
@@ -1443,6 +1466,10 @@
                            p2m_type_to_flags(p2mt, mfn) | _PAGE_PSE)
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
+
+        if ( entry_content.l1 != 0 )
+            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
+
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
 
@@ -1481,7 +1508,10 @@
                                          p2m_type_to_flags(p2mt, mfn));
         else
             entry_content = l1e_empty();
-        
+
+        if ( entry_content.l1 != 0 )
+            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
+
         /* level 1 entry */
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1512,6 +1542,10 @@
             l2e_content = l2e_empty();
         
         entry_content.l1 = l2e_content.l2;
+
+        if ( entry_content.l1 != 0 )
+            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
+
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/mm: add AMD IOMMU control bits to p2m entries., Xen patchbot-unstable <=