[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 27 of 38] xen/dom0: use _PAGE_IOMAP in ioremap to do machine mappings



In a Xen domain, ioremap operates on machine addresses, not
pseudo-physical addresses.  We use _PAGE_IOMAP to determine whether a
mapping is intended for machine addresses.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
---
 arch/x86/include/asm/xen/page.h |    8 +----
 arch/x86/xen/enlighten.c        |   14 +++++++--
 arch/x86/xen/mmu.c              |   60 ++++++++++++++++++++++++++++++++++++++-
 3 files changed, 73 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -112,13 +112,9 @@
  */
 static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
 {
-       extern unsigned long max_mapnr;
        unsigned long pfn = mfn_to_pfn(mfn);
-       if ((pfn < max_mapnr)
-           && !xen_feature(XENFEAT_auto_translated_physmap)
-           && (get_phys_to_machine(pfn) != mfn))
-               return max_mapnr; /* force !pfn_valid() */
-       /* XXX fixme; not true with sparsemem */
+       if (get_phys_to_machine(pfn) != mfn)
+               return -1; /* force !pfn_valid() */
        return pfn;
 }
 
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1162,11 +1162,19 @@
 #ifdef CONFIG_X86_LOCAL_APIC
        case FIX_APIC_BASE:     /* maps dummy local APIC */
 #endif
+               /* All local page mappings */
                pte = pfn_pte(phys, prot);
                break;
 
+       case FIX_PARAVIRT_BOOTMAP:
+               /* This is an MFN, but it isn't an IO mapping from the
+                  IO domain */
+               pte = mfn_pte(phys, prot);
+               break;
+
        default:
-               pte = mfn_pte(phys, prot);
+               /* By default, set_fixmap is used for hardware mappings */
+               pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
                break;
        }
 
@@ -1695,7 +1703,9 @@
 
        /* Prevent unwanted bits from being set in PTEs. */
        __supported_pte_mask &= ~_PAGE_GLOBAL;
-       if (!xen_initial_domain())
+       if (xen_initial_domain())
+               __supported_pte_mask |= _PAGE_IOMAP;
+       else
                __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
 
        /* Don't do the full vcpu_info placement stuff until we have a
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -302,6 +302,28 @@
        return PagePinned(page);
 }
 
+static bool xen_iomap_pte(pte_t pte)
+{
+       return xen_initial_domain() && (pte_flags(pte) & _PAGE_IOMAP);
+}
+
+static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+{
+       struct multicall_space mcs;
+       struct mmu_update *u;
+
+       mcs = xen_mc_entry(sizeof(*u));
+       u = mcs.args;
+
+       /* ptep might be kmapped when using 32-bit HIGHPTE */
+       u->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       u->val = pte_val_ma(pteval);
+
+       MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
 static void xen_extend_mmu_update(const struct mmu_update *update)
 {
        struct multicall_space mcs;
@@ -382,6 +404,11 @@
        if (mm == &init_mm)
                preempt_disable();
 
+       if (xen_iomap_pte(pteval)) {
+               xen_set_iomap_pte(ptep, pteval);
+               goto out;
+       }
+
        ADD_STATS(set_pte_at, 1);
 //     ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
        ADD_STATS(set_pte_at_current, mm == current->mm);
@@ -454,8 +481,25 @@
        return val;
 }
 
+static pteval_t iomap_pte(pteval_t val)
+{
+       if (val & _PAGE_PRESENT) {
+               unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+               pteval_t flags = val & PTE_FLAGS_MASK;
+
+               /* We assume the pte frame number is a MFN, so
+                  just use it as-is. */
+               val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
+       }
+
+       return val;
+}
+
 pteval_t xen_pte_val(pte_t pte)
 {
+       if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
+               return pte.pte;
+
        return pte_mfn_to_pfn(pte.pte);
 }
 
@@ -466,7 +510,11 @@
 
 pte_t xen_make_pte(pteval_t pte)
 {
-       pte = pte_pfn_to_mfn(pte);
+       if (unlikely(xen_initial_domain() && (pte & _PAGE_IOMAP)))
+               pte = iomap_pte(pte);
+       else
+               pte = pte_pfn_to_mfn(pte);
+
        return native_make_pte(pte);
 }
 
@@ -519,6 +567,11 @@
 
 void xen_set_pte(pte_t *ptep, pte_t pte)
 {
+       if (xen_iomap_pte(pte)) {
+               xen_set_iomap_pte(ptep, pte);
+               return;
+       }
+
        ADD_STATS(pte_update, 1);
 //     ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
        ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == 
PARAVIRT_LAZY_MMU);
@@ -535,6 +588,11 @@
 #ifdef CONFIG_X86_PAE
 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
+       if (xen_iomap_pte(pte)) {
+               xen_set_iomap_pte(ptep, pte);
+               return;
+       }
+
        set_64bit((u64 *)ptep, native_pte_val(pte));
 }
 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.