WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Just some small code cleanup to shadow.c, no logic chang

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Just some small code cleanup to shadow.c, no logic changed.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 02 Nov 2005 11:22:06 +0000
Delivery-date: Wed, 02 Nov 2005 11:19:46 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID c31edd72086dc20ef94b7e9af8deb8c243581efc
# Parent  5ed53e973b833850eb2fcba16a361988ea133661
Just some small code cleanup to shadow.c, no logic changed.

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>

diff -r 5ed53e973b83 -r c31edd72086d xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Tue Nov  1 23:08:01 2005
+++ b/xen/arch/x86/shadow.c     Wed Nov  2 10:18:51 2005
@@ -1,19 +1,19 @@
 /******************************************************************************
- * arch/x86/shadow_64.c
- * 
+ * arch/x86/shadow.c
+ *
  * Copyright (c) 2005 Michael A Fetterman
  * Based on an earlier implementation by Ian Pratt et al
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
@@ -55,7 +55,6 @@
     unsigned long va, unsigned int from, unsigned int to);
 static inline void validate_bl2e_change( struct domain *d,
     guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
-
 #endif
 
 /********
@@ -102,7 +101,6 @@
         return 1;
 #endif
         return 0;
-        
     }
 
     // To convert this page to use as a page table, the writable count
@@ -490,12 +488,12 @@
          * We could proactively fill in PDEs for pages that are already
          * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
          * (restriction required for coherence of the accessed bit). However,
-         * we tried it and it didn't help performance. This is simpler. 
+         * we tried it and it didn't help performance. This is simpler.
          */
         memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
 
         /* Install hypervisor and 2x linear p.t. mapings. */
-        memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
+        memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
 
@@ -522,7 +520,7 @@
             //
             if ( !get_shadow_ref(hl2mfn) )
                 BUG();
-            
+
             spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
                 l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
         }
@@ -532,7 +530,7 @@
     }
     else
     {
-        memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));        
+        memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));
     }
 
     unmap_domain_page(spl2e);
@@ -543,7 +541,7 @@
 #endif
 
 static void shadow_map_l1_into_current_l2(unsigned long va)
-{ 
+{
     struct vcpu *v = current;
     struct domain *d = v->domain;
     l1_pgentry_t *spl1e;
@@ -596,7 +594,7 @@
 #if CONFIG_PAGING_LEVELS >=4
     if (d->arch.ops->guest_paging_levels == PAGING_L2)
     {
-        /* for 32-bit VMX guest on 64-bit host, 
+        /* for 32-bit VMX guest on 64-bit host,
          * need update two L2 entries each time
          */
         if ( !get_shadow_ref(sl1mfn))
@@ -624,7 +622,7 @@
         l1_pgentry_t sl1e;
         int index = guest_l1_table_offset(va);
         int min = 1, max = 0;
-        
+
         unsigned long entries, pt_va;
         l1_pgentry_t tmp_sl1e;
         guest_l1_pgentry_t tmp_gl1e;//Prepare for double compile
@@ -790,7 +788,7 @@
 
         /* Record the allocation block so it can be correctly freed later. */
         d->arch.out_of_sync_extras_count++;
-        *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) = 
+        *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) =
             d->arch.out_of_sync_extras;
         d->arch.out_of_sync_extras = &extra[0];
 
@@ -1020,7 +1018,7 @@
 {
     struct domain *d = v->domain;
 #if defined (__x86_64__)
-    unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)? 
+    unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)?
                           pagetable_get_pfn(v->arch.guest_table) :
                           pagetable_get_pfn(v->arch.guest_table_user));
 #else
@@ -1082,7 +1080,7 @@
         return 1;
 
     __guest_get_l2e(v, va, &l2e);
-    if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) || 
+    if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) ||
          (guest_l2e_get_flags(l2e) & _PAGE_PSE))
         return 0;
 
@@ -1155,7 +1153,7 @@
 }
 
 static int fix_entry(
-    struct domain *d, 
+    struct domain *d,
     l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
 {
     l1_pgentry_t old = *pt;
@@ -1194,19 +1192,19 @@
     match = l1e_from_pfn(readonly_gmfn, flags);
 
     if ( shadow_mode_external(d) ) {
-        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
+        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask)
             >> PGT_va_shift;
 
         if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) &&
-             !l1e_has_changed(pt[i], match, flags) && 
+             !l1e_has_changed(pt[i], match, flags) &&
              fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) &&
              !prediction )
             goto out;
     }
- 
+
     for (i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++)
     {
-        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 
+        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) &&
              fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) )
             break;
     }
@@ -1255,7 +1253,7 @@
     }
 
     if ( shadow_mode_external(d) ) {
-        if (write_refs-- == 0) 
+        if (write_refs-- == 0)
             return 0;
 
          // Use the back pointer to locate the shadow page that can contain
@@ -1275,7 +1273,7 @@
         a = &d->arch.shadow_ht[i];
         while ( a && a->gpfn_and_flags )
         {
-            if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow 
+            if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow
 #if CONFIG_PAGING_LEVELS >= 4
               || (a->gpfn_and_flags & PGT_type_mask) == PGT_fl1_shadow
 #endif
@@ -1384,10 +1382,10 @@
                 if ( (i < min_snapshot) || (i > max_snapshot) ||
                      guest_l1e_has_changed(guest1[i], snapshot1[i], 
PAGE_FLAG_MASK) )
                 {
-                    int error; 
+                    int error;
 
                     error = validate_pte_change(d, guest1[i], &shadow1[i]);
-                    if ( error ==  -1 ) 
+                    if ( error ==  -1 )
                         unshadow_l1 = 1;
                     else {
                         need_flush |= error;
@@ -1474,7 +1472,7 @@
             l2_pgentry_t *guest2 = guest;
             l2_pgentry_t *snapshot2 = snapshot;
             l1_pgentry_t *shadow2 = shadow;
-            
+
             ASSERT(shadow_mode_write_all(d));
             BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
 
@@ -1634,7 +1632,7 @@
              !shadow_get_page_from_l1e(npte, d) )
             BUG();
         *ppte = npte;
-        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 
+        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT,
                            (entry->writable_pl1e & 
~PAGE_MASK)/sizeof(l1_pgentry_t));
         shadow_put_page_from_l1e(opte, d);
 
@@ -1719,7 +1717,7 @@
 
 static inline int l1pte_read_fault(
     struct domain *d, guest_l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
-{ 
+{
     guest_l1_pgentry_t gpte = *gpte_p;
     l1_pgentry_t spte = *spte_p;
     unsigned long pfn = l1e_get_pfn(gpte);
@@ -1761,7 +1759,7 @@
     SH_VVLOG("shadow_fault( va=%lx, code=%lu )",
              va, (unsigned long)regs->error_code);
     perfc_incrc(shadow_fault_calls);
-    
+
     check_pagetable(v, "pre-sf");
 
     /*
@@ -1804,7 +1802,7 @@
     }
 
     /* Write fault? */
-    if ( regs->error_code & 2 )  
+    if ( regs->error_code & 2 )
     {
         int allow_writes = 0;
 
@@ -1818,7 +1816,7 @@
             else
             {
                 /* Write fault on a read-only mapping. */
-                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte 
")", 
+                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte 
")",
                          l1e_get_intpte(gpte));
                 perfc_incrc(shadow_fault_bail_ro_mapping);
                 goto fail;
@@ -1878,7 +1876,7 @@
     check_pagetable(v, "post-sf");
     return EXCRET_fault_fixed;
 
- fail:
+fail:
     shadow_unlock(d);
     return 0;
 }
@@ -1895,7 +1893,7 @@
     shadow_lock(d);
 
     //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void 
*)l1e_get_intpte(val));
-        
+
     // This is actually overkill - we don't need to sync the L1 itself,
     // just everything involved in getting to this L1 (i.e. we need
     // linear_pg_table[l1_linear_offset(va)] to be in sync)...
@@ -1925,7 +1923,7 @@
  * and what it uses to get/maintain that mapping.
  *
  * SHADOW MODE:      none         enable         translate         external
- * 
+ *
  * 4KB things:
  * guest_vtable    lin_l2     mapped per gl2   lin_l2 via hl2   mapped per gl2
  * shadow_vtable     n/a         sh_lin_l2       sh_lin_l2      mapped per gl2
@@ -1950,7 +1948,7 @@
 {
     struct domain *d = v->domain;
 #if defined (__x86_64__)
-    unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)? 
+    unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)?
                           pagetable_get_pfn(v->arch.guest_table) :
                           pagetable_get_pfn(v->arch.guest_table_user));
 #else
@@ -2006,7 +2004,7 @@
     /*
      * arch.shadow_vtable
      */
-    if ( max_mode == SHM_external 
+    if ( max_mode == SHM_external
 #if CONFIG_PAGING_LEVELS >=4
          || max_mode & SHM_enable
 #endif
@@ -2241,7 +2239,7 @@
                page_table_page);
         FAIL("RW2 coherence");
     }
- 
+
     if ( eff_guest_mfn == shadow_mfn )
     {
         if ( level > 1 )
@@ -2291,7 +2289,7 @@
         errors += check_pte(v, p_guest+i, p_shadow+i,
                             p_snapshot ? p_snapshot+i : NULL,
                             1, l2_idx, i);
- 
+
     unmap_domain_page(p_shadow);
     unmap_domain_page(p_guest);
     if ( p_snapshot )
@@ -2327,11 +2325,11 @@
 
 #if 0
     if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
-                &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
+                &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
                 ((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
                  DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) )
     {
-        for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE; 
+        for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
               i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
               i++ )
             printk("+++ (%d) %lx %lx\n",i,
@@ -2339,7 +2337,7 @@
         FAILPT("hypervisor entries inconsistent");
     }
 
-    if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) != 
+    if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
           l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) )
         FAILPT("hypervisor linear map inconsistent");
 #endif
@@ -2399,7 +2397,7 @@
 {
     struct domain *d = v->domain;
 #if defined (__x86_64__)
-    pagetable_t pt = ((v->arch.flags & TF_kernel_mode)? 
+    pagetable_t pt = ((v->arch.flags & TF_kernel_mode)?
                       pagetable_get_pfn(v->arch.guest_table) :
                       pagetable_get_pfn(v->arch.guest_table_user));
 #else
@@ -2434,7 +2432,7 @@
         oos_pdes = 1;
         ASSERT(ptbase_mfn);
     }
- 
+
     errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes);
 
     gpl2e = (l2_pgentry_t *) map_domain_page(ptbase_mfn);
@@ -2565,7 +2563,6 @@
  * The code is for 32-bit VMX gues on 64-bit host.
  * To sync guest L2.
  */
-
 static inline void
 validate_bl2e_change(
   struct domain *d,
@@ -2596,7 +2593,6 @@
             entry_from_pfn(sl1mfn + 1, entry_get_flags(sl2_p[sl2_idx]));
     }
     unmap_domain_page(sl2_p);
-
 }
 
 /*
@@ -2629,9 +2625,8 @@
     }
 
     unmap_domain_page(spl4e);
+
     return smfn;
-
-
 }
 
 static unsigned long shadow_l4_table(
@@ -2664,7 +2659,7 @@
          * We could proactively fill in PDEs for pages that are already
          * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
          * (restriction required for coherence of the accessed bit). However,
-         * we tried it and it didn't help performance. This is simpler. 
+         * we tried it and it didn't help performance. This is simpler.
          */
         memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));
 
@@ -2757,7 +2752,7 @@
     }
 }
 
-static void shadow_map_into_current(struct vcpu *v, 
+static void shadow_map_into_current(struct vcpu *v,
   unsigned long va, unsigned int from, unsigned int to)
 {
     pgentry_64_t gle, sle;
@@ -2768,7 +2763,7 @@
         return;
     }
 
-    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to); 
+    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to);
     ASSERT(entry_get_flags(gle) & _PAGE_PRESENT);
     gpfn = entry_get_pfn(gle);
 
@@ -2784,7 +2779,7 @@
 /*
  * shadow_set_lxe should be put in shadow.h
  */
-static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e, 
+static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e,
   int create_l2_shadow, int put_ref_check)
 {
     struct vcpu *v = current;
@@ -2934,11 +2929,11 @@
         sl2e = l2e_empty();
 
     l1_mfn = ___shadow_status(d, start_gpfn | nx, PGT_fl1_shadow);
-    
+
     /* Check the corresponding l2e */
     if (l1_mfn) {
         /* Why it is PRESENT?*/
-        if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) && 
+        if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) &&
                 l2e_get_pfn(sl2e) == l1_mfn) {
             ESH_LOG("sl2e PRSENT bit is set: %lx, l1_mfn = %lx\n", 
l2e_get_pfn(sl2e), l1_mfn);
         } else {
@@ -2985,7 +2980,7 @@
         sl1e = l1e_from_pfn(mfn, l2e_get_flags(tmp_l2e));
 
         if (!rw) {
-            if ( shadow_mode_log_dirty(d) || 
+            if ( shadow_mode_log_dirty(d) ||
               !(l2e_get_flags(gl2e) & _PAGE_DIRTY) || mfn_is_page_table(mfn) )
             {
                 l1e_remove_flags(sl1e, _PAGE_RW);
@@ -3034,7 +3029,7 @@
  */
 #if defined( GUEST_PGENTRY_32 )
 static inline int guest_page_fault(struct vcpu *v,
-  unsigned long va, unsigned int error_code, 
+  unsigned long va, unsigned int error_code,
   guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
 {
     /* The following check for 32-bit guest on 64-bit host */
@@ -3076,7 +3071,7 @@
 }
 #else
 static inline int guest_page_fault(struct vcpu *v,
-  unsigned long va, unsigned int error_code, 
+  unsigned long va, unsigned int error_code,
   guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
 {
     struct domain *d = v->domain;
@@ -3144,7 +3139,7 @@
 
     perfc_incrc(shadow_fault_calls);
 
-    ESH_LOG("<shadow_fault_64> va=%lx,  rip = %lx, error code = %x\n", 
+    ESH_LOG("<shadow_fault_64> va=%lx,  rip = %lx, error code = %x\n",
             va, regs->eip, regs->error_code);
 
     /*
@@ -3166,12 +3161,12 @@
             v, va, regs->error_code, &gl2e, &gl1e) ) {
         goto fail;
     }
-    
+
     if ( unlikely(!(guest_l2e_get_flags(gl2e) & _PAGE_PSE)) ) {
         /*
          * Handle 4K pages here
          */
-        
+
         /* Write fault? */
         if ( regs->error_code & 2 ) {
             if ( !l1pte_write_fault(v, &gl1e, &sl1e, va) ) {
@@ -3194,7 +3189,7 @@
          */
          if ( unlikely(shadow_mode_log_dirty(d)) )
             __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
- 
+
     } else {
         /*
          * Handle 2M pages here
@@ -3262,7 +3257,7 @@
 
     if (guest_page_fault(v, gva, 0, &gl2e, &gl1e))
         return 0;
-    
+
     if (guest_l2e_get_flags(gl2e) & _PAGE_PSE)
         gpa = guest_l2e_get_paddr(gl2e) + (gva & ((1 << 
GUEST_L2_PAGETABLE_SHIFT) - 1));
     else

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Just some small code cleanup to shadow.c, no logic changed., Xen patchbot -unstable <=