[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 04/20] x86/shadow: Only apply shadow heuristics when in guest context



It is incorrect to be applying these heuristics because of toolstack actions.

As the vcpu parameters are to be replaced with domain parameters, guest
context is identified by using current->domain.

Note that the majority of the heuristics in sh_remove_write_access() were
already restricted to guest context, but they are updated to use 'curr' for
clarity.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c |   21 +++++++++++++--------
 xen/arch/x86/mm/shadow/multi.c  |   13 +++++++++----
 2 files changed, 22 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 3b5ef19..26dab30 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2170,6 +2170,9 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
         ;
     struct domain *d = v->domain;
     struct page_info *pg = mfn_to_page(gmfn);
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+    struct vcpu *curr = current;
+#endif
 
     ASSERT(paging_locked_by_me(d));
 
@@ -2205,7 +2208,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
     }
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
-    if ( v == current )
+    if ( curr->domain == d )
     {
         unsigned long gfn;
         /* Heuristic: there is likely to be only one writeable mapping,
@@ -2213,7 +2216,8 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
          * in the guest's linear map (on non-HIGHPTE linux and windows)*/
 
 #define GUESS(_a, _h) do {                                              \
-            if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \
+            if ( curr->arch.paging.mode->shadow.guess_wrmap(            \
+                     curr, (_a), gmfn) )                                \
                 perfc_incr(shadow_writeable_h_ ## _h);                  \
             if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )        \
             {                                                           \
@@ -2222,7 +2226,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
             }                                                           \
         } while (0)
 
-        if ( v->arch.paging.mode->guest_levels == 2 )
+        if ( curr->arch.paging.mode->guest_levels == 2 )
         {
             if ( level == 1 )
                 /* 32bit non-PAE w2k3: linear map at 0xC0000000 */
@@ -2237,7 +2241,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                 GUESS(0xBFC00000UL
                       + ((fault_addr & VADDR_MASK) >> 10), 6);
         }
-        else if ( v->arch.paging.mode->guest_levels == 3 )
+        else if ( curr->arch.paging.mode->guest_levels == 3 )
         {
             /* 32bit PAE w2k3: linear map at 0xC0000000 */
             switch ( level )
@@ -2259,7 +2263,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                           + ((fault_addr & VADDR_MASK) >> 18), 6); break;
             }
         }
-        else if ( v->arch.paging.mode->guest_levels == 4 )
+        else if ( curr->arch.paging.mode->guest_levels == 4 )
         {
             /* 64bit w2k3: linear map at 0xfffff68000000000 */
             switch ( level )
@@ -2312,14 +2316,15 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
      * the writeable mapping by looking at the same MFN where the last
      * brute-force search succeeded. */
 
-    if ( v->arch.paging.shadow.last_writeable_pte_smfn != 0 )
+    if ( (curr->domain == d) &&
+         (curr->arch.paging.shadow.last_writeable_pte_smfn != 0) )
     {
         unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
-        mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
+        mfn_t last_smfn = 
_mfn(curr->arch.paging.shadow.last_writeable_pte_smfn);
         int shtype = mfn_to_page(last_smfn)->u.sh.type;
 
         if ( callbacks[shtype] )
-            callbacks[shtype](v, last_smfn, gmfn);
+            callbacks[shtype](curr, last_smfn, gmfn);
 
         if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
             perfc_incr(shadow_writeable_h_5);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index b538997..f532bff 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4185,6 +4185,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
 int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
                                  mfn_t smfn, unsigned long off)
 {
+    struct domain *d = v->domain;
+    struct vcpu *curr = current;
     int r;
     shadow_l1e_t *sl1p, sl1e;
     struct page_info *sp;
@@ -4193,9 +4195,9 @@ int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t 
gmfn,
     ASSERT(mfn_valid(smfn));
 
     /* Remember if we've been told that this process is being torn down */
-    v->arch.paging.shadow.pagetable_dying
-        = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying);
-
+    if ( curr->domain == d )
+        curr->arch.paging.shadow.pagetable_dying
+            = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying);
 
     sp = mfn_to_page(smfn);
 
@@ -4290,6 +4292,8 @@ int sh_rm_write_access_from_l1(struct vcpu *v, mfn_t 
sl1mfn,
     int done = 0;
     int flags;
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+    struct domain *d = v->domain;
+    struct vcpu *curr = current;
     mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
 #endif
 
@@ -4304,7 +4308,8 @@ int sh_rm_write_access_from_l1(struct vcpu *v, mfn_t 
sl1mfn,
             (void) shadow_set_l1e(v, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn);
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
             /* Remember the last shadow that we shot a writeable mapping in */
-            v->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
+            if ( curr->domain == d )
+                curr->arch.paging.shadow.last_writeable_pte_smfn = 
mfn_x(base_sl1mfn);
 #endif
             if ( (mfn_to_page(readonly_mfn)->u.inuse.type_info
                   & PGT_count_mask) == 0 )
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.