[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v7.2 07/24] x86/mm: Introduce modify_xen_mappings()



To simply change the permissions on existing Xen mappings.  The existing
destroy_xen_mappings() is altered to support a change the PTE permissions.

A new destroy_xen_mappings() is introduced, as the special case of not passing
_PAGE_PRESENT to modify_xen_mappings().

As cleanup (and an ideal functional test), the boot logic which remaps Xen's
code and data with reduced permissions is altered to use
modify_xen_mappings(), rather than map_pages_to_xen() passing the same mfn's
back in.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

v7.1:
 * Allow callers to pass PAGE_HYPERVISOR_xxx constants, for consistently with
   similar functions.
 * Replace one opencoded 0 with its logical equivalent, _PAGE_NONE.
v7.2:
 * Add check for creating new mappings of 4K entries.
 * Correct the continue logic.  Invert the sense of the comment to match the
   code.
---
 xen/arch/x86/mm.c    | 75 +++++++++++++++++++++++++++++++++++++++++++++-------
 xen/arch/x86/setup.c | 22 +++++++--------
 xen/include/xen/mm.h |  2 ++
 3 files changed, 76 insertions(+), 23 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index bca7532..3230b9d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5956,7 +5956,19 @@ int populate_pt_range(unsigned long virt, unsigned long 
mfn,
     return map_pages_to_xen(virt, mfn, nr_mfns, MAP_SMALL_PAGES);
 }
 
-void destroy_xen_mappings(unsigned long s, unsigned long e)
+/*
+ * Alter the permissions of a range of Xen virtual address space.
+ *
+ * Does not create new mappings, and does not modify the mfn in existing
+ * mappings, but will shatter superpages if necessary, and will destroy
+ * mappings if not passed _PAGE_PRESENT.
+ *
+ * The only flags considered are NX, RW and PRESENT.  All other input flags
+ * are ignored.
+ *
+ * It is an error to call with present flags over an unpopulated range.
+ */
+void modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
 {
     bool_t locking = system_state > SYS_STATE_boot;
     l2_pgentry_t *pl2e;
@@ -5964,6 +5976,10 @@ void destroy_xen_mappings(unsigned long s, unsigned long 
e)
     unsigned int  i;
     unsigned long v = s;
 
+    /* Set of valid PTE bits which may be altered. */
+#define FLAGS_MASK (_PAGE_NX|_PAGE_RW|_PAGE_PRESENT)
+    nf &= FLAGS_MASK;
+
     ASSERT(IS_ALIGNED(s, PAGE_SIZE));
     ASSERT(IS_ALIGNED(e, PAGE_SIZE));
 
@@ -5973,6 +5989,9 @@ void destroy_xen_mappings(unsigned long s, unsigned long 
e)
 
         if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
         {
+            /* Confirm the caller isn't trying to create new mappings. */
+            ASSERT(!(nf & _PAGE_PRESENT));
+
             v += 1UL << L3_PAGETABLE_SHIFT;
             v &= ~((1UL << L3_PAGETABLE_SHIFT) - 1);
             continue;
@@ -5984,8 +6003,12 @@ void destroy_xen_mappings(unsigned long s, unsigned long 
e)
                  l1_table_offset(v) == 0 &&
                  ((e - v) >= (1UL << L3_PAGETABLE_SHIFT)) )
             {
-                /* PAGE1GB: whole superpage is destroyed. */
-                l3e_write_atomic(pl3e, l3e_empty());
+                /* PAGE1GB: whole superpage is modified. */
+                l3_pgentry_t nl3e = !(nf & _PAGE_PRESENT) ? l3e_empty()
+                    : l3e_from_pfn(l3e_get_pfn(*pl3e),
+                                   (l3e_get_flags(*pl3e) & ~FLAGS_MASK) | nf);
+
+                l3e_write_atomic(pl3e, nl3e);
                 v += 1UL << L3_PAGETABLE_SHIFT;
                 continue;
             }
@@ -6016,6 +6039,9 @@ void destroy_xen_mappings(unsigned long s, unsigned long 
e)
 
         if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
         {
+            /* Confirm the caller isn't trying to create new mappings. */
+            ASSERT(!(nf & _PAGE_PRESENT));
+
             v += 1UL << L2_PAGETABLE_SHIFT;
             v &= ~((1UL << L2_PAGETABLE_SHIFT) - 1);
             continue;
@@ -6026,8 +6052,12 @@ void destroy_xen_mappings(unsigned long s, unsigned long 
e)
             if ( (l1_table_offset(v) == 0) &&
                  ((e-v) >= (1UL << L2_PAGETABLE_SHIFT)) )
             {
-                /* PSE: whole superpage is destroyed. */
-                l2e_write_atomic(pl2e, l2e_empty());
+                /* PSE: whole superpage is modified. */
+                l2_pgentry_t nl2e = !(nf & _PAGE_PRESENT) ? l2e_empty()
+                    : l2e_from_pfn(l2e_get_pfn(*pl2e),
+                                   (l2e_get_flags(*pl2e) & ~FLAGS_MASK) | nf);
+
+                l2e_write_atomic(pl2e, nl2e);
                 v += 1UL << L2_PAGETABLE_SHIFT;
             }
             else
@@ -6055,13 +6085,27 @@ void destroy_xen_mappings(unsigned long s, unsigned 
long e)
         }
         else
         {
+            l1_pgentry_t nl1e;
+
             /* Ordinary 4kB mapping. */
             pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
-            l1e_write_atomic(pl1e, l1e_empty());
+
+            /* Confirm the caller isn't trying to create new mappings. */
+            if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
+                ASSERT(!(nf & _PAGE_PRESENT));
+
+            nl1e = !(nf & _PAGE_PRESENT) ? l1e_empty()
+                : l1e_from_pfn(l1e_get_pfn(*pl1e),
+                               (l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf);
+
+            l1e_write_atomic(pl1e, nl1e);
             v += PAGE_SIZE;
 
-            /* If we are done with the L2E, check if it is now empty. */
-            if ( (v != e) && (l1_table_offset(v) != 0) )
+            /*
+             * If we not destroying mappings, or are not done with the L2E,
+             * skip the empty&free check.
+             */
+            if ( (nf & _PAGE_PRESENT) || ((v != e) && (l1_table_offset(v) != 
0)) )
                 continue;
             pl1e = l2e_to_l1e(*pl2e);
             for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
@@ -6076,8 +6120,12 @@ void destroy_xen_mappings(unsigned long s, unsigned long 
e)
             }
         }
 
-        /* If we are done with the L3E, check if it is now empty. */
-        if ( (v != e) && (l2_table_offset(v) + l1_table_offset(v) != 0) )
+        /*
+         * If we not destroying mappings, or are not done with the L3E,
+         * skip the empty&free check.
+         */
+        if ( (nf & _PAGE_PRESENT) ||
+             ((v != e) && (l2_table_offset(v) + l1_table_offset(v) != 0)) )
             continue;
         pl2e = l3e_to_l2e(*pl3e);
         for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
@@ -6093,10 +6141,17 @@ void destroy_xen_mappings(unsigned long s, unsigned 
long e)
     }
 
     flush_area(NULL, FLUSH_TLB_GLOBAL);
+
+#undef FLAGS_MASK
 }
 
 #undef flush_area
 
+void destroy_xen_mappings(unsigned long s, unsigned long e)
+{
+    modify_xen_mappings(s, e, _PAGE_NONE);
+}
+
 void __set_fixmap(
     enum fixed_addresses idx, unsigned long mfn, unsigned long flags)
 {
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 3696c31..1a223d4 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1214,23 +1214,19 @@ void __init noreturn __start_xen(unsigned long mbi_p)
     if ( !using_2M_mapping() )
     {
         /* Mark .text as RX (avoiding the first 2M superpage). */
-        map_pages_to_xen(XEN_VIRT_START + MB(2),
-                         PFN_DOWN(__pa(XEN_VIRT_START + MB(2))),
-                         PFN_DOWN(__2M_text_end -
-                                  (const char *)(XEN_VIRT_START + MB(2))),
-                         PAGE_HYPERVISOR_RX);
+        modify_xen_mappings(XEN_VIRT_START + MB(2),
+                            (unsigned long)&__2M_text_end,
+                            PAGE_HYPERVISOR_RX);
 
         /* Mark .rodata as RO. */
-        map_pages_to_xen((unsigned long)&__2M_rodata_start,
-                         PFN_DOWN(__pa(__2M_rodata_start)),
-                         PFN_DOWN(__2M_rodata_end - __2M_rodata_start),
-                         PAGE_HYPERVISOR_RO);
+        modify_xen_mappings((unsigned long)&__2M_rodata_start,
+                            (unsigned long)&__2M_rodata_end,
+                            PAGE_HYPERVISOR_RO);
 
         /* Mark .data and .bss as RW. */
-        map_pages_to_xen((unsigned long)&__2M_rwdata_start,
-                         PFN_DOWN(__pa(__2M_rwdata_start)),
-                         PFN_DOWN(__2M_rwdata_end - __2M_rwdata_start),
-                         PAGE_HYPERVISOR_RW);
+        modify_xen_mappings((unsigned long)&__2M_rwdata_start,
+                            (unsigned long)&__2M_rwdata_end,
+                            PAGE_HYPERVISOR_RW);
 
         /* Drop the remaining mappings in the shattered superpage. */
         destroy_xen_mappings((unsigned long)&__2M_rwdata_end,
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index d62394f..d4721fc 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -103,6 +103,8 @@ int map_pages_to_xen(
     unsigned long mfn,
     unsigned long nr_mfns,
     unsigned int flags);
+/* Alter the permissions of a range of Xen virtual address space. */
+void modify_xen_mappings(unsigned long s, unsigned long e, unsigned int flags);
 void destroy_xen_mappings(unsigned long v, unsigned long e);
 /*
  * Create only non-leaf page table entries for the
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.