[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/mm: Drop preemptible signal through {get, put}_page_type*()



After XSA-273, handling of L2 pagetables must strictly be preemptible, meaning
that all page types are now strictly preemptible (>= L2) or not (L1, SEGDESC).

Simplify the code by removing the preemptible booleans, which has an
unexpectedly large change to _get_page_type().

  add/remove: 0/0 grow/shrink: 0/15 up/down: 0/-509 (-509)
  Function                                     old     new   delta
  put_page_type                                  9       7      -2
  get_page_type                                 24      22      -2
  put_page_from_l2e                            251     247      -4
  put_page_type_preemptible                     12       7      -5
  put_page_from_l4e                            268     263      -5
  put_page_from_l3e                            461     456      -5
  get_page_type_preemptible                     10       5      -5
  put_page_from_l1e                            385     377      -8
  vcpu_destroy_pagetables                      450     437     -13
  put_old_guest_table                          119     103     -16
  get_page_from_l1e                           1214    1196     -18
  get_page_and_type_from_mfn                   240     206     -34
  _put_page_type                               842     804     -38
  do_mmuext_op                                5576    5526     -50
  _get_page_type                              5771    5467    -304
  Total: Before=3297078, After=3296569, chg -0.02%

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/domain.c    |  2 +-
 xen/arch/x86/mm.c        | 76 ++++++++++++++++++++----------------------------
 xen/include/asm-x86/mm.h |  3 +-
 3 files changed, 34 insertions(+), 47 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index eb1e93f..4f64602 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1875,7 +1875,7 @@ static int relinquish_memory(
             if ( likely(y == x) )
             {
                 /* No need for atomic update of type_info here: noone else 
updates it. */
-                switch ( ret = free_page_type(page, x, 1) )
+                switch ( ret = free_page_type(page, x) )
                 {
                 case 0:
                     break;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8ac4412..f2faaa8 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -613,12 +613,11 @@ static int alloc_segdesc_page(struct page_info *page)
     return i == 512 ? 0 : -EINVAL;
 }
 
-static int _get_page_type(struct page_info *page, unsigned long type,
-                          bool preemptible);
+static int _get_page_type(struct page_info *page, unsigned long type);
 
 static int get_page_and_type_from_mfn(
     mfn_t mfn, unsigned long type, struct domain *d,
-    int partial, int preemptible)
+    int partial)
 {
     struct page_info *page = mfn_to_page(mfn);
     int rc;
@@ -627,10 +626,10 @@ static int get_page_and_type_from_mfn(
          unlikely(!get_page_from_mfn(mfn, d)) )
         return -EINVAL;
 
-    rc = _get_page_type(page, type, preemptible);
+    rc = _get_page_type(page, type);
 
     if ( unlikely(rc) && partial >= 0 &&
-         (!preemptible || page != current->arch.old_guest_table) )
+         page != current->arch.old_guest_table )
         put_page(page);
 
     return rc;
@@ -1125,7 +1124,7 @@ get_page_from_l2e(
         return -EINVAL;
     }
 
-    rc = get_page_and_type_from_mfn(_mfn(mfn), PGT_l1_page_table, d, 0, 0);
+    rc = get_page_and_type_from_mfn(_mfn(mfn), PGT_l1_page_table, d, 0);
     if ( unlikely(rc == -EINVAL) && get_l2_linear_pagetable(l2e, pfn, d) )
         rc = 0;
 
@@ -1157,7 +1156,7 @@ get_page_from_l3e(
     }
 
     rc = get_page_and_type_from_mfn(
-        l3e_get_mfn(l3e), PGT_l2_page_table, d, partial, 1);
+        l3e_get_mfn(l3e), PGT_l2_page_table, d, partial);
     if ( unlikely(rc == -EINVAL) &&
          !is_pv_32bit_domain(d) &&
          get_l3_linear_pagetable(l3e, pfn, d) )
@@ -1190,15 +1189,14 @@ get_page_from_l4e(
     }
 
     rc = get_page_and_type_from_mfn(
-        l4e_get_mfn(l4e), PGT_l3_page_table, d, partial, 1);
+        l4e_get_mfn(l4e), PGT_l3_page_table, d, partial);
     if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, pfn, d) )
         rc = 0;
 
     return rc;
 }
 
-static int _put_page_type(struct page_info *page, bool preemptible,
-                          struct page_info *ptpg);
+static int _put_page_type(struct page_info *page, struct page_info *ptpg);
 
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
 {
@@ -1295,7 +1293,7 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned 
long pfn)
     else
     {
         struct page_info *pg = l2e_get_page(l2e);
-        int rc = _put_page_type(pg, false, mfn_to_page(_mfn(pfn)));
+        int rc = _put_page_type(pg, mfn_to_page(_mfn(pfn)));
 
         ASSERT(!rc);
         put_page(pg);
@@ -1331,7 +1329,7 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned 
long pfn,
     if ( unlikely(partial > 0) )
     {
         ASSERT(!defer);
-        return _put_page_type(pg, true, mfn_to_page(_mfn(pfn)));
+        return _put_page_type(pg, mfn_to_page(_mfn(pfn)));
     }
 
     if ( defer )
@@ -1341,7 +1339,7 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned 
long pfn,
         return 0;
     }
 
-    rc = _put_page_type(pg, true, mfn_to_page(_mfn(pfn)));
+    rc = _put_page_type(pg, mfn_to_page(_mfn(pfn)));
     if ( likely(!rc) )
         put_page(pg);
 
@@ -1361,7 +1359,7 @@ static int put_page_from_l4e(l4_pgentry_t l4e, unsigned 
long pfn,
         if ( unlikely(partial > 0) )
         {
             ASSERT(!defer);
-            return _put_page_type(pg, true, mfn_to_page(_mfn(pfn)));
+            return _put_page_type(pg, mfn_to_page(_mfn(pfn)));
         }
 
         if ( defer )
@@ -1371,7 +1369,7 @@ static int put_page_from_l4e(l4_pgentry_t l4e, unsigned 
long pfn,
             return 0;
         }
 
-        rc = _put_page_type(pg, true, mfn_to_page(_mfn(pfn)));
+        rc = _put_page_type(pg, mfn_to_page(_mfn(pfn)));
         if ( likely(!rc) )
             put_page(pg);
     }
@@ -1544,7 +1542,7 @@ static int alloc_l3_table(struct page_info *page)
             else
                 rc = get_page_and_type_from_mfn(
                     l3e_get_mfn(pl3e[i]),
-                    PGT_l2_page_table | PGT_pae_xen_l2, d, partial, 1);
+                    PGT_l2_page_table | PGT_pae_xen_l2, d, partial);
         }
         else if ( (rc = get_page_from_l3e(pl3e[i], pfn, d, partial)) > 0 )
             continue;
@@ -2378,8 +2376,7 @@ static void get_page_light(struct page_info *page)
     while ( unlikely(y != x) );
 }
 
-static int alloc_page_type(struct page_info *page, unsigned long type,
-                           int preemptible)
+static int alloc_page_type(struct page_info *page, unsigned long type)
 {
     struct domain *owner = page_get_owner(page);
     int rc;
@@ -2394,15 +2391,12 @@ static int alloc_page_type(struct page_info *page, 
unsigned long type,
         rc = alloc_l1_table(page);
         break;
     case PGT_l2_page_table:
-        ASSERT(preemptible);
         rc = alloc_l2_table(page, type);
         break;
     case PGT_l3_page_table:
-        ASSERT(preemptible);
         rc = alloc_l3_table(page);
         break;
     case PGT_l4_page_table:
-        ASSERT(preemptible);
         rc = alloc_l4_table(page);
         break;
     case PGT_seg_desc_page:
@@ -2453,8 +2447,7 @@ static int alloc_page_type(struct page_info *page, 
unsigned long type,
 }
 
 
-int free_page_type(struct page_info *page, unsigned long type,
-                   int preemptible)
+int free_page_type(struct page_info *page, unsigned long type)
 {
     struct domain *owner = page_get_owner(page);
     unsigned long gmfn;
@@ -2485,15 +2478,12 @@ int free_page_type(struct page_info *page, unsigned 
long type,
         rc = 0;
         break;
     case PGT_l2_page_table:
-        ASSERT(preemptible);
         rc = free_l2_table(page);
         break;
     case PGT_l3_page_table:
-        ASSERT(preemptible);
         rc = free_l3_table(page);
         break;
     case PGT_l4_page_table:
-        ASSERT(preemptible);
         rc = free_l4_table(page);
         break;
     default:
@@ -2508,9 +2498,9 @@ int free_page_type(struct page_info *page, unsigned long 
type,
 
 
 static int _put_final_page_type(struct page_info *page, unsigned long type,
-                                bool preemptible, struct page_info *ptpg)
+                                struct page_info *ptpg)
 {
-    int rc = free_page_type(page, type, preemptible);
+    int rc = free_page_type(page, type);
 
     /* No need for atomic update of type_info here: noone else updates it. */
     if ( rc == 0 )
@@ -2544,8 +2534,7 @@ static int _put_final_page_type(struct page_info *page, 
unsigned long type,
 }
 
 
-static int _put_page_type(struct page_info *page, bool preemptible,
-                          struct page_info *ptpg)
+static int _put_page_type(struct page_info *page, struct page_info *ptpg)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
 
@@ -2576,7 +2565,7 @@ static int _put_page_type(struct page_info *page, bool 
preemptible,
                                            x, nx)) != x) )
                     break;
                 /* We cleared the 'valid bit' so we do the clean up. */
-                rc = _put_final_page_type(page, x, preemptible, ptpg);
+                rc = _put_final_page_type(page, x, ptpg);
                 if ( x & PGT_partial )
                     put_page(page);
 
@@ -2624,14 +2613,13 @@ static int _put_page_type(struct page_info *page, bool 
preemptible,
             break;
         }
 
-        if ( preemptible && hypercall_preempt_check() )
+        if ( hypercall_preempt_check() )
             return -EINTR;
     }
 }
 
 
-static int _get_page_type(struct page_info *page, unsigned long type,
-                          bool preemptible)
+static int _get_page_type(struct page_info *page, unsigned long type)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
     int rc = 0, iommu_ret = 0;
@@ -2728,7 +2716,7 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
             {
                 /* Someone else is updating validation of this page. Wait... */
                 do {
-                    if ( preemptible && hypercall_preempt_check() )
+                    if ( hypercall_preempt_check() )
                         return -EINTR;
                     cpu_relax();
                 } while ( (y = page->u.inuse.type_info) == x );
@@ -2742,7 +2730,7 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
         if ( likely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) == x) )
             break;
 
-        if ( preemptible && hypercall_preempt_check() )
+        if ( hypercall_preempt_check() )
             return -EINTR;
     }
 
@@ -2771,7 +2759,7 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
             page->partial_pte = 0;
         }
         page->linear_pt_count = 0;
-        rc = alloc_page_type(page, type, preemptible);
+        rc = alloc_page_type(page, type);
     }
 
     if ( (x & PGT_partial) && !(nx & PGT_partial) )
@@ -2785,14 +2773,14 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
 
 void put_page_type(struct page_info *page)
 {
-    int rc = _put_page_type(page, false, NULL);
+    int rc = _put_page_type(page, NULL);
     ASSERT(rc == 0);
     (void)rc;
 }
 
 int get_page_type(struct page_info *page, unsigned long type)
 {
-    int rc = _get_page_type(page, type, false);
+    int rc = _get_page_type(page, type);
 
     if ( likely(rc == 0) )
         return 1;
@@ -2802,14 +2790,14 @@ int get_page_type(struct page_info *page, unsigned long 
type)
 
 int put_page_type_preemptible(struct page_info *page)
 {
-    return _put_page_type(page, true, NULL);
+    return _put_page_type(page, NULL);
 }
 
 int get_page_type_preemptible(struct page_info *page, unsigned long type)
 {
     ASSERT(!current->arch.old_guest_table);
 
-    return _get_page_type(page, type, true);
+    return _get_page_type(page, type);
 }
 
 int put_old_guest_table(struct vcpu *v)
@@ -2819,7 +2807,7 @@ int put_old_guest_table(struct vcpu *v)
     if ( !v->arch.old_guest_table )
         return 0;
 
-    switch ( rc = _put_page_type(v->arch.old_guest_table, true,
+    switch ( rc = _put_page_type(v->arch.old_guest_table,
                                  v->arch.old_guest_ptpg) )
     {
     case -EINTR:
@@ -2945,7 +2933,7 @@ int new_guest_cr3(mfn_t mfn)
         return 0;
     }
 
-    rc = get_page_and_type_from_mfn(mfn, PGT_root_page_table, d, 0, 1);
+    rc = get_page_and_type_from_mfn(mfn, PGT_root_page_table, d, 0);
     switch ( rc )
     {
     case 0:
@@ -3333,7 +3321,7 @@ long do_mmuext_op(
             if ( op.arg1.mfn != 0 )
             {
                 rc = get_page_and_type_from_mfn(
-                    _mfn(op.arg1.mfn), PGT_root_page_table, currd, 0, 1);
+                    _mfn(op.arg1.mfn), PGT_root_page_table, currd, 0);
 
                 if ( unlikely(rc) )
                 {
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 6e45651..1988e57 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -330,8 +330,7 @@ static inline void *__page_to_virt(const struct page_info 
*pg)
                     (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg))));
 }
 
-int free_page_type(struct page_info *page, unsigned long type,
-                   int preemptible);
+int free_page_type(struct page_info *page, unsigned long type);
 
 void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
 void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.