WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Really just basic preparation: switch over PAE builds to

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Really just basic preparation: switch over PAE builds to the new
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 29 Jul 2005 06:36:12 -0400
Delivery-date: Fri, 29 Jul 2005 10:36:45 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 691cd6f6573955bea673e2d15077a451d26bfae8
# Parent  0474ffc52ba79c5803a3cbe33380c555f71372dd
Really just basic preparation: switch over PAE builds to the new
shadow code, drop old dummy functions, add (fewer) new ones.

diff -r 0474ffc52ba7 -r 691cd6f65739 xen/arch/x86/Makefile
--- a/xen/arch/x86/Makefile     Fri Jul 29 10:22:03 2005
+++ b/xen/arch/x86/Makefile     Fri Jul 29 10:23:07 2005
@@ -13,11 +13,18 @@
 OBJS := $(subst cpu/cyrix.o,,$(OBJS))
 OBJS := $(subst cpu/rise.o,,$(OBJS))
 OBJS := $(subst cpu/transmeta.o,,$(OBJS))
-OBJS := $(subst shadow32.o,,$(OBJS))
-else
-OBJS := $(subst shadow.o,,$(OBJS))
-OBJS := $(subst shadow_public.o,,$(OBJS))
-OBJS := $(subst shadow_xxx.o,,$(OBJS))
+endif
+
+OBJS := $(patsubst shadow%.o,,$(OBJS)) # drop all
+ifeq ($(TARGET_SUBARCH),x86_64) 
+ OBJS += shadow.o shadow_public.o      # x86_64: new code
+endif
+ifeq ($(TARGET_SUBARCH),x86_32) 
+ ifneq ($(pae),n)
+  OBJS += shadow.o shadow_public.o     # x86_32p: new code
+ else
+  OBJS += shadow32.o                   # x86_32: old code
+ endif
 endif
 
 OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
diff -r 0474ffc52ba7 -r 691cd6f65739 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Fri Jul 29 10:22:03 2005
+++ b/xen/arch/x86/shadow.c     Fri Jul 29 10:23:07 2005
@@ -41,7 +41,13 @@
 static void mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned 
long gpfn);
 #endif
 
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS == 3
+#include <asm/shadow_64.h>
+static unsigned long shadow_l3_table(
+    struct domain *d, unsigned long gpfn, unsigned long gmfn);
+#endif
+
+#if CONFIG_PAGING_LEVELS == 4
 #include <asm/shadow_64.h>
 static unsigned long shadow_l4_table(
     struct domain *d, unsigned long gpfn, unsigned long gmfn);
@@ -1833,7 +1839,7 @@
     unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
     unsigned long smfn, old_smfn;
 
-#if defined (__i386__)
+#if CONFIG_PAGING_LEVELS == 2
     unsigned long hl2mfn;
 #endif
   
@@ -1890,7 +1896,7 @@
         v->arch.shadow_vtable = map_domain_page(smfn);
     }
 
-#if defined (__i386__)
+#if CONFIG_PAGING_LEVELS == 2
     /*
      * arch.hl2_vtable
      */
@@ -1935,6 +1941,10 @@
         // XXX - maybe this can be optimized somewhat??
         local_flush_tlb();
     }
+#endif
+
+#if CONFIG_PAGING_LEVELS == 3
+    /* FIXME: PAE code to be written */
 #endif
 }
 
@@ -2427,6 +2437,7 @@
   struct domain *d, unsigned long gpfn, unsigned long gmfn)
 {
     BUG();                      /* not implemenated yet */
+    return 42;
 }
 #endif
 
diff -r 0474ffc52ba7 -r 691cd6f65739 xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Fri Jul 29 10:22:03 2005
+++ b/xen/arch/x86/vmx.c        Fri Jul 29 10:23:07 2005
@@ -38,7 +38,7 @@
 #include <asm/vmx_vmcs.h>
 #include <asm/vmx_intercept.h>
 #include <asm/shadow.h>
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
 #include <asm/shadow_64.h>
 #endif
 
diff -r 0474ffc52ba7 -r 691cd6f65739 xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       Fri Jul 29 10:22:03 2005
+++ b/xen/arch/x86/vmx_platform.c       Fri Jul 29 10:23:07 2005
@@ -32,7 +32,7 @@
 #include <xen/lib.h>
 #include <xen/sched.h>
 #include <asm/current.h>
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
 #include <asm/shadow_64.h>
 #endif
 #ifdef CONFIG_VMX
diff -r 0474ffc52ba7 -r 691cd6f65739 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Fri Jul 29 10:22:03 2005
+++ b/xen/include/asm-x86/shadow.h      Fri Jul 29 10:23:07 2005
@@ -131,12 +131,12 @@
                                        unsigned long pa, l2_pgentry_t l2e,
                                        struct domain_mmap_cache *cache);
 #if CONFIG_PAGING_LEVELS >= 3
+#include <asm/page-guest32.h>
 extern void shadow_l3_normal_pt_update(struct domain *d,
                                        unsigned long pa, l3_pgentry_t l3e,
                                        struct domain_mmap_cache *cache);
 #endif
 #if CONFIG_PAGING_LEVELS >= 4
-#include <asm/page-guest32.h>
 extern void shadow_l4_normal_pt_update(struct domain *d,
                                        unsigned long pa, l4_pgentry_t l4e,
                                        struct domain_mmap_cache *cache);
@@ -631,82 +631,6 @@
 }
 #endif
 
-#if CONFIG_PAGING_LEVELS == 3
-/* dummy functions, PAE has no shadow support yet */
-
-static inline void
-__shadow_get_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t *psl2e)
-{
-    BUG();
-}
-
-static inline void
-__shadow_set_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t value)
-{
-    BUG();
-}
-
-static inline void
-__guest_get_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t *pl2e)
-{
-    BUG();
-}
-
-static inline void
-__guest_set_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t value)
-{
-    BUG();
-}
-
-static inline void shadow_drop_references(
-    struct domain *d, struct pfn_info *page)
-{
-    if ( likely(!shadow_mode_refcounts(d)) ||
-         ((page->u.inuse.type_info & PGT_count_mask) == 0) )
-        return;
-    BUG();
-}
-
-static inline void shadow_sync_and_drop_references(
-    struct domain *d, struct pfn_info *page)
-{
-    if ( likely(!shadow_mode_refcounts(d)) )
-        return;
-    BUG();
-}
-
-static inline int l1pte_write_fault(
-    struct vcpu *v, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
-    unsigned long va)
-{
-    BUG();
-    return 42;
-}
-
-static inline int l1pte_read_fault(
-    struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
-{
-    BUG();
-    return 42;
-}
-
-void static inline
-shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
-{
-    BUG();
-}
-
-static inline unsigned long gva_to_gpa(unsigned long gva)
-{
-    BUG();
-    return 42;
-}
-#endif
-    
 /************************************************************************/
 
 /*
diff -r 0474ffc52ba7 -r 691cd6f65739 xen/include/asm-x86/shadow_64.h
--- a/xen/include/asm-x86/shadow_64.h   Fri Jul 29 10:22:03 2005
+++ b/xen/include/asm-x86/shadow_64.h   Fri Jul 29 10:23:07 2005
@@ -85,8 +85,10 @@
             return  (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 
1));
         case 3:
             return  (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 
1));
+#if CONFIG_PAGING_LEVELS >= 4
         case 4:
             return  (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 
1));
+#endif
         default:
             //printk("<table_offset_64> level %d is too big\n", level);
             return -1;
diff -r 0474ffc52ba7 -r 691cd6f65739 xen/include/asm-x86/shadow_public.h
--- a/xen/include/asm-x86/shadow_public.h       Fri Jul 29 10:22:03 2005
+++ b/xen/include/asm-x86/shadow_public.h       Fri Jul 29 10:23:07 2005
@@ -21,7 +21,7 @@
 
 #ifndef _XEN_SHADOW_PUBLIC_H
 #define _XEN_SHADOW_PUBLIC_H
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
 #define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned)
 
 extern int alloc_p2m_table(struct domain *d);
@@ -30,10 +30,6 @@
       struct domain *d, struct pfn_info *page);
 extern void shadow_drop_references(
       struct domain *d, struct pfn_info *page);
-
-extern void shadow_l4_normal_pt_update(struct domain *d,
-                                       unsigned long pa, l4_pgentry_t l4e,
-                                       struct domain_mmap_cache *cache);
 
 extern int shadow_set_guest_paging_levels(struct domain *d, int levels);
 
@@ -56,4 +52,10 @@
 };
 #endif
 
+#if CONFIG_PAGING_LEVELS >= 4
+extern void shadow_l4_normal_pt_update(struct domain *d,
+                                       unsigned long pa, l4_pgentry_t l4e,
+                                       struct domain_mmap_cache *cache);
 #endif
+
+#endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Really just basic preparation: switch over PAE builds to the new, Xen patchbot -unstable <=