[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 07/10] passthrough/amd: split out hvm code from iommu_map.c



Move and rename update_paging_mode. Create a local header file for
this and other functions that need exporting.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
 xen/drivers/passthrough/x86/amd/Makefile    |   1 +
 xen/drivers/passthrough/x86/amd/hvm.c       | 108 ++++++++++++++++++++++++++++
 xen/drivers/passthrough/x86/amd/iommu.h     |  32 +++++++++
 xen/drivers/passthrough/x86/amd/iommu_map.c | 103 ++------------------------
 4 files changed, 148 insertions(+), 96 deletions(-)
 create mode 100644 xen/drivers/passthrough/x86/amd/hvm.c
 create mode 100644 xen/drivers/passthrough/x86/amd/iommu.h

diff --git a/xen/drivers/passthrough/x86/amd/Makefile 
b/xen/drivers/passthrough/x86/amd/Makefile
index 415146fcdb..ed28fdb660 100644
--- a/xen/drivers/passthrough/x86/amd/Makefile
+++ b/xen/drivers/passthrough/x86/amd/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_HVM) += hvm.o
 obj-bin-y += iommu_detect.init.o
 obj-y += iommu_init.o
 obj-y += iommu_map.o
diff --git a/xen/drivers/passthrough/x86/amd/hvm.c 
b/xen/drivers/passthrough/x86/amd/hvm.c
new file mode 100644
index 0000000000..35c76c2545
--- /dev/null
+++ b/xen/drivers/passthrough/x86/amd/hvm.c
@@ -0,0 +1,108 @@
+#include <xen/sched.h>
+#include <asm/amd-iommu.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
+#include "../../ats.h"
+#include <xen/pci.h>
+
+#include "iommu.h"
+
+int hvm_update_paging_mode(struct domain *d, unsigned long gfn)
+{
+    u16 bdf;
+    void *device_entry;
+    unsigned int req_id, level, offset;
+    unsigned long flags;
+    struct pci_dev *pdev;
+    struct amd_iommu *iommu = NULL;
+    struct page_info *new_root = NULL;
+    struct page_info *old_root = NULL;
+    void *new_root_vaddr;
+    unsigned long old_root_mfn;
+    struct domain_iommu *hd = dom_iommu(d);
+
+    if ( gfn == gfn_x(INVALID_GFN) )
+        return -EADDRNOTAVAIL;
+    ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
+
+    level = hd->arch.paging_mode;
+    old_root = hd->arch.root_table;
+    offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
+
+    ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
+
+    while ( offset >= PTE_PER_TABLE_SIZE )
+    {
+        /* Allocate and install a new root table.
+         * Only upper I/O page table grows, no need to fix next level bits */
+        new_root = alloc_amd_iommu_pgtable();
+        if ( new_root == NULL )
+        {
+            AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n",
+                            __func__);
+            return -ENOMEM;
+        }
+
+        new_root_vaddr = __map_domain_page(new_root);
+        old_root_mfn = page_to_mfn(old_root);
+        set_iommu_pde_present(new_root_vaddr, old_root_mfn, level,
+                              !!IOMMUF_writable, !!IOMMUF_readable);
+        level++;
+        old_root = new_root;
+        offset >>= PTE_PER_TABLE_SHIFT;
+        unmap_domain_page(new_root_vaddr);
+    }
+
+    if ( new_root != NULL )
+    {
+        hd->arch.paging_mode = level;
+        hd->arch.root_table = new_root;
+
+        if ( !pcidevs_locked() )
+            AMD_IOMMU_DEBUG("%s Try to access pdev_list "
+                            "without aquiring pcidevs_lock.\n", __func__);
+
+        /* Update device table entries using new root table and paging mode */
+        for_each_pdev( d, pdev )
+        {
+            bdf = PCI_BDF2(pdev->bus, pdev->devfn);
+            iommu = find_iommu_for_device(pdev->seg, bdf);
+            if ( !iommu )
+            {
+                AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
+                return -ENODEV;
+            }
+
+            spin_lock_irqsave(&iommu->lock, flags);
+            do {
+                req_id = get_dma_requestor_id(pdev->seg, bdf);
+                device_entry = iommu->dev_table.buffer +
+                               (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+
+                /* valid = 0 only works for dom0 passthrough mode */
+                amd_iommu_set_root_page_table((u32 *)device_entry,
+                                              
page_to_maddr(hd->arch.root_table),
+                                              d->domain_id,
+                                              hd->arch.paging_mode, 1);
+
+                amd_iommu_flush_device(iommu, req_id);
+                bdf += pdev->phantom_stride;
+            } while ( PCI_DEVFN2(bdf) != pdev->devfn &&
+                      PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
+            spin_unlock_irqrestore(&iommu->lock, flags);
+        }
+
+        /* For safety, invalidate all entries */
+        amd_iommu_flush_all_pages(d);
+    }
+    return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/drivers/passthrough/x86/amd/iommu.h 
b/xen/drivers/passthrough/x86/amd/iommu.h
new file mode 100644
index 0000000000..bb31de61ae
--- /dev/null
+++ b/xen/drivers/passthrough/x86/amd/iommu.h
@@ -0,0 +1,32 @@
+#ifndef _X86_AMD_IOMMU_H_
+#define _X86_AMD_IOMMU_H_
+
+bool_t set_iommu_pde_present(u32 *pde, unsigned long next_mfn,
+                             unsigned int next_level,
+                             bool_t iw, bool_t ir);
+
+#ifdef CONFIG_HVM
+
+int hvm_update_paging_mode(struct domain *d, unsigned long gfn);
+
+#else
+
+static inline int hvm_update_paging_mode(struct domain *d, unsigned long gfn)
+{
+    ASSERT_UNREACHABLE();
+    return -EINVAL;
+}
+
+#endif
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/drivers/passthrough/x86/amd/iommu_map.c 
b/xen/drivers/passthrough/x86/amd/iommu_map.c
index 0f9bd538af..e51a5c5ffe 100644
--- a/xen/drivers/passthrough/x86/amd/iommu_map.c
+++ b/xen/drivers/passthrough/x86/amd/iommu_map.c
@@ -25,6 +25,8 @@
 #include "../../ats.h"
 #include <xen/pci.h>
 
+#include "iommu.h"
+
 /* Given pfn and page table level, return pde index */
 static unsigned int pfn_to_pde_idx(unsigned long pfn, unsigned int level)
 {
@@ -45,9 +47,9 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned 
long gfn)
     unmap_domain_page(table);
 }
 
-static bool_t set_iommu_pde_present(u32 *pde, unsigned long next_mfn, 
-                                    unsigned int next_level,
-                                    bool_t iw, bool_t ir)
+bool_t set_iommu_pde_present(u32 *pde, unsigned long next_mfn,
+                            unsigned int next_level,
+                            bool_t iw, bool_t ir)
 {
     u64 addr_lo, addr_hi, maddr_old, maddr_next;
     u32 entry;
@@ -540,97 +542,6 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned 
long pfn,
     return 0;
 }
 
-static int update_paging_mode(struct domain *d, unsigned long gfn)
-{
-    u16 bdf;
-    void *device_entry;
-    unsigned int req_id, level, offset;
-    unsigned long flags;
-    struct pci_dev *pdev;
-    struct amd_iommu *iommu = NULL;
-    struct page_info *new_root = NULL;
-    struct page_info *old_root = NULL;
-    void *new_root_vaddr;
-    unsigned long old_root_mfn;
-    struct domain_iommu *hd = dom_iommu(d);
-
-    if ( gfn == gfn_x(INVALID_GFN) )
-        return -EADDRNOTAVAIL;
-    ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
-
-    level = hd->arch.paging_mode;
-    old_root = hd->arch.root_table;
-    offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
-
-    ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
-
-    while ( offset >= PTE_PER_TABLE_SIZE )
-    {
-        /* Allocate and install a new root table.
-         * Only upper I/O page table grows, no need to fix next level bits */
-        new_root = alloc_amd_iommu_pgtable();
-        if ( new_root == NULL )
-        {
-            AMD_IOMMU_DEBUG("%s Cannot allocate I/O page table\n",
-                            __func__);
-            return -ENOMEM;
-        }
-
-        new_root_vaddr = __map_domain_page(new_root);
-        old_root_mfn = page_to_mfn(old_root);
-        set_iommu_pde_present(new_root_vaddr, old_root_mfn, level,
-                              !!IOMMUF_writable, !!IOMMUF_readable);
-        level++;
-        old_root = new_root;
-        offset >>= PTE_PER_TABLE_SHIFT;
-        unmap_domain_page(new_root_vaddr);
-    }
-
-    if ( new_root != NULL )
-    {
-        hd->arch.paging_mode = level;
-        hd->arch.root_table = new_root;
-
-        if ( !pcidevs_locked() )
-            AMD_IOMMU_DEBUG("%s Try to access pdev_list "
-                            "without aquiring pcidevs_lock.\n", __func__);
-
-        /* Update device table entries using new root table and paging mode */
-        for_each_pdev( d, pdev )
-        {
-            bdf = PCI_BDF2(pdev->bus, pdev->devfn);
-            iommu = find_iommu_for_device(pdev->seg, bdf);
-            if ( !iommu )
-            {
-                AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
-                return -ENODEV;
-            }
-
-            spin_lock_irqsave(&iommu->lock, flags);
-            do {
-                req_id = get_dma_requestor_id(pdev->seg, bdf);
-                device_entry = iommu->dev_table.buffer +
-                               (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
-
-                /* valid = 0 only works for dom0 passthrough mode */
-                amd_iommu_set_root_page_table((u32 *)device_entry,
-                                              
page_to_maddr(hd->arch.root_table),
-                                              d->domain_id,
-                                              hd->arch.paging_mode, 1);
-
-                amd_iommu_flush_device(iommu, req_id);
-                bdf += pdev->phantom_stride;
-            } while ( PCI_DEVFN2(bdf) != pdev->devfn &&
-                      PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
-            spin_unlock_irqrestore(&iommu->lock, flags);
-        }
-
-        /* For safety, invalidate all entries */
-        amd_iommu_flush_all_pages(d);
-    }
-    return 0;
-}
-
 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
                        unsigned int flags)
 {
@@ -660,7 +571,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, 
unsigned long mfn,
      * we might need a deeper page table for lager gfn now */
     if ( is_hvm_domain(d) )
     {
-        if ( update_paging_mode(d, gfn) )
+        if ( hvm_update_paging_mode(d, gfn) )
         {
             spin_unlock(&hd->arch.mapping_lock);
             AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
@@ -742,7 +653,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
gfn)
      * we might need a deeper page table for lager gfn now */
     if ( is_hvm_domain(d) )
     {
-        int rc = update_paging_mode(d, gfn);
+        int rc = hvm_update_paging_mode(d, gfn);
 
         if ( rc )
         {
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.