WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Patch for 64-bit VMX guest destroy

# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 52260d8c27754a54c636bc73483f51e189281ff7
# Parent  8c194453808697f5f7b4b883bc28ec9a61ec661d
Patch for 64-bit VMX guest destroy

64-bit VMX guest can't be destroyed without this patch, as guest page's
reference count problem.
This patch made two point of reference count correct:
1. In invlpg, the old page count need put.
2. There is a specail case for fl1's shadow reference count.=20
=20

Signed-off-by: Chengyuan Li <chengyuan.li@xxxxxxxxx>
Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>

diff -r 8c1944538086 -r 52260d8c2775 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Fri Jul 29 10:34:45 2005
+++ b/xen/arch/x86/shadow.c     Fri Jul 29 10:36:11 2005
@@ -1075,6 +1075,11 @@
     int is_l1_shadow =
         ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
+#if CONFIG_PAGING_LEVELS == 4
+    is_l1_shadow |=
+      ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
+                PGT_fl1_shadow);
+#endif
 
     match = l1e_from_pfn(readonly_gmfn, flags);
 
@@ -2592,7 +2597,7 @@
  * shadow_set_lxe should be put in shadow.h
  */
 static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e, 
-  int create_l2_shadow)
+  int create_l2_shadow, int put_ref_check)
 {
     struct vcpu *v = current;
     l4_pgentry_t sl4e;
@@ -2619,6 +2624,17 @@
             printk("For non VMX shadow, create_l1_shadow:%d\n", 
create_l2_shadow);
         }
          shadow_update_min_max(l4e_get_pfn(sl4e), l3_table_offset(va));
+
+    }
+
+    if ( put_ref_check ) {
+        l2_pgentry_t tmp_sl2e;
+        if ( __shadow_get_l2e(v, va, &tmp_sl2e) ) {
+            if ( l2e_get_flags(tmp_sl2e) & _PAGE_PRESENT )
+                if ( l2e_get_pfn(tmp_sl2e) == l2e_get_pfn(sl2e) ) {
+                    put_shadow_ref(l2e_get_pfn(sl2e));
+                }
+        }
 
     }
 
@@ -2692,7 +2708,7 @@
     l1_pgentry_t old_sl1e;
     l2_pgentry_t sl2e;
     unsigned long nx = 0;
-
+    int put_ref_check = 0;
     /* Check if gpfn is 2M aligned */
 
     /* Update guest l2e */
@@ -2723,6 +2739,7 @@
                 l2e_get_pfn(sl2e) == l1_mfn) {
             ESH_LOG("sl2e PRSENT bit is set: %lx, l1_mfn = %lx\n", 
l2e_get_pfn(sl2e), l1_mfn);
         } else {
+            put_ref_check = 1;
             if (!get_shadow_ref(l1_mfn))
                 BUG();
         }
@@ -2746,7 +2763,7 @@
 
     ESH_LOG("<%s>: sl2e = %lx\n", __func__, l2e_get_intpte(sl2e));
     /* Map the page to l2*/
-    shadow_set_l2e_64(va, sl2e, 1);
+    shadow_set_l2e_64(va, sl2e, 1, put_ref_check);
 
     if (l2e_get_flags(gl2e) & _PAGE_NX)
         l2e_add_flags(tmp_l2e, _PAGE_NX);
@@ -2911,10 +2928,14 @@
 static void shadow_invlpg_64(struct vcpu *v, unsigned long va)
 {
     struct domain *d = v->domain;
-    //l1_pgentry_64_t  gl1e, sl1e;
-    l1_pgentry_t  sl1e;
+    l1_pgentry_t  sl1e, old_sl1e;
 
     shadow_lock(d);
+
+    if ( __shadow_get_l1e(v, va, &old_sl1e) )
+        if ( l1e_get_flags(old_sl1e) & _PAGE_PRESENT )
+            put_page_from_l1e(old_sl1e, d);
+
 
     sl1e = l1e_empty();
     __shadow_set_l1e(v, va, &sl1e);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Patch for 64-bit VMX guest destroy, Xen patchbot -unstable <=