[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 4/5] x86/PV: restrict TLB flushing after mod_l[234]_entry()



Just like we avoid to invoke remote root pt flushes when all uses of an
L4 table can be accounted for locally, the same can be done for all of
L[234] for the linear pt flush when the table is a "free floating" one,
i.e. it is pinned but not hooked up anywhere. While this situation
doesn't occur very often, it can be observed.

Since this breaks one of the implications of the XSA-286 fix, drop the
flush_root_pt_local variable again and set ->root_pgt_changed directly,
just like it was before that change.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
While adjusting the big comment that was added for XSA-286 I wondered
why it talks about the "construction of 32bit PV guests". How are 64-bit
PV guests different in this regard?

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3903,8 +3903,7 @@ long do_mmu_update(
     struct vcpu *curr = current, *v = curr;
     struct domain *d = v->domain, *pt_owner = d, *pg_owner;
     mfn_t map_mfn = INVALID_MFN, mfn;
-    bool flush_linear_pt = false, flush_root_pt_local = false,
-        flush_root_pt_others = false;
+    bool flush_linear_pt = false, flush_root_pt_others = false;
     uint32_t xsm_needed = 0;
     uint32_t xsm_checked = 0;
     int rc = put_old_guest_table(curr);
@@ -4054,7 +4053,9 @@ long do_mmu_update(
                         break;
                     rc = mod_l2_entry(va, l2e_from_intpte(req.val), mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
-                    if ( !rc )
+                    if ( !rc &&
+                         (page->u.inuse.type_info & PGT_count_mask) >
+                         1 + !!(page->u.inuse.type_info & PGT_pinned) )
                         flush_linear_pt = true;
                     break;
 
@@ -4063,7 +4064,9 @@ long do_mmu_update(
                         break;
                     rc = mod_l3_entry(va, l3e_from_intpte(req.val), mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
-                    if ( !rc )
+                    if ( !rc &&
+                         (page->u.inuse.type_info & PGT_count_mask) >
+                         1 + !!(page->u.inuse.type_info & PGT_pinned) )
                         flush_linear_pt = true;
                     break;
 
@@ -4072,7 +4075,9 @@ long do_mmu_update(
                         break;
                     rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
-                    if ( !rc )
+                    if ( !rc &&
+                         (page->u.inuse.type_info & PGT_count_mask) >
+                         1 + !!(page->u.inuse.type_info & PGT_pinned) )
                         flush_linear_pt = true;
                     if ( !rc && pt_owner->arch.pv.xpti )
                     {
@@ -4082,7 +4087,7 @@ long do_mmu_update(
                                     mfn) )
                         {
                             local_in_use = true;
-                            flush_root_pt_local = true;
+                            get_cpu_info()->root_pgt_changed = true;
                         }
 
                         /*
@@ -4199,8 +4204,8 @@ long do_mmu_update(
     /*
      * Perform required TLB maintenance.
      *
-     * This logic currently depend on flush_linear_pt being a superset of the
-     * flush_root_pt_* conditions.
+     * This logic currently depends on flush_linear_pt being a superset of the
+     * flush_root_pt_others condition.
      *
      * pt_owner may not be current->domain.  This may occur during
      * construction of 32bit PV guests, or debugging of PV guests.  The
@@ -4219,7 +4224,7 @@ long do_mmu_update(
      * pt_owner->dirty_cpumask), and/or all *other* dirty CPUs as there are
      * references we can't account for locally.
      */
-    if ( flush_linear_pt /* || flush_root_pt_local || flush_root_pt_others */ )
+    if ( flush_linear_pt /* || flush_root_pt_others */ )
     {
         unsigned int cpu = smp_processor_id();
         cpumask_t *mask = pt_owner->dirty_cpumask;
@@ -4236,12 +4241,8 @@ long do_mmu_update(
             cpumask_copy(mask, pt_owner->dirty_cpumask);
             __cpumask_clear_cpu(cpu, mask);
 
-            flush_local(FLUSH_TLB |
-                        (flush_root_pt_local ? FLUSH_ROOT_PGTBL : 0));
+            flush_local(FLUSH_TLB);
         }
-        else
-            /* Sanity check.  flush_root_pt_local implies local cpu is dirty. 
*/
-            ASSERT(!flush_root_pt_local);
 
         /* Flush the remote dirty CPUs.  Does not include the local CPU. */
         if ( !cpumask_empty(mask) )
@@ -4249,8 +4250,8 @@ long do_mmu_update(
                        (flush_root_pt_others ? FLUSH_ROOT_PGTBL : 0));
     }
     else
-        /* Sanity check.  flush_root_pt_* implies flush_linear_pt. */
-        ASSERT(!flush_root_pt_local && !flush_root_pt_others);
+        /* Sanity check.  flush_root_pt_others implies flush_linear_pt. */
+        ASSERT(!flush_root_pt_others);
 
     perfc_add(num_page_updates, i);
 




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.