WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: Only build memory-event features on

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: Only build memory-event features on 64-bit Xen
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 29 Jun 2010 13:45:21 -0700
Delivery-date: Tue, 29 Jun 2010 13:48:12 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1277831801 -3600
# Node ID 6b5a5bfaf3577a050c6779b0b62245560fda53f6
# Parent  3ea84fd20b263a8e443e3bb16d5495cd3dbd8033
x86: Only build memory-event features on 64-bit Xen

32-bit Xen doesn't have enough p2m types to support them.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/domctl.c             |    2 ++
 xen/arch/x86/hvm/hvm.c            |    2 ++
 xen/arch/x86/mm.c                 |    9 +++++++--
 xen/arch/x86/mm/Makefile          |    6 +++---
 xen/arch/x86/mm/p2m.c             |   13 ++++++++++---
 xen/include/asm-x86/mem_sharing.h |    8 ++++++++
 xen/include/asm-x86/p2m.h         |   26 +++++++++++++++++---------
 7 files changed, 49 insertions(+), 17 deletions(-)

diff -r 3ea84fd20b26 -r 6b5a5bfaf357 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Tue Jun 29 13:43:18 2010 +0100
+++ b/xen/arch/x86/domctl.c     Tue Jun 29 18:16:41 2010 +0100
@@ -1420,6 +1420,7 @@ long arch_do_domctl(
     break;
 #endif /* XEN_GDBSX_CONFIG */
 
+#ifdef __x86_64__
     case XEN_DOMCTL_mem_event_op:
     {
         struct domain *d;
@@ -1450,6 +1451,7 @@ long arch_do_domctl(
         } 
     }
     break;
+#endif /* __x86_64__ */
 
     default:
         ret = -ENOSYS;
diff -r 3ea84fd20b26 -r 6b5a5bfaf357 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Jun 29 13:43:18 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Tue Jun 29 18:16:41 2010 +0100
@@ -982,6 +982,7 @@ bool_t hvm_hap_nested_page_fault(unsigne
         return 1;
     }
 
+#ifdef __x86_64__
     /* Check if the page has been paged out */
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
         p2m_mem_paging_populate(current->domain, gfn);
@@ -992,6 +993,7 @@ bool_t hvm_hap_nested_page_fault(unsigne
         mem_sharing_unshare_page(current->domain, gfn, 0);
         return 1;
     }
+#endif
  
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
diff -r 3ea84fd20b26 -r 6b5a5bfaf357 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Jun 29 13:43:18 2010 +0100
+++ b/xen/arch/x86/mm.c Tue Jun 29 18:16:41 2010 +0100
@@ -3448,20 +3448,23 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
+#ifdef __x86_64__
                     /* XXX: Ugly: pull all the checks into a separate 
function. 
                      * Don't want to do it now, not to interfere with 
mem_paging
                      * patches */
                     else if ( p2m_ram_shared == l1e_p2mt )
                     {
                         /* Unshare the page for RW foreign mappings */
-                        if(l1e_get_flags(l1e) & _PAGE_RW)
+                        if ( l1e_get_flags(l1e) & _PAGE_RW )
                         {
                             rc = mem_sharing_unshare_page(pg_owner, 
                                                           l1e_get_pfn(l1e), 
                                                           0);
-                            if(rc) break; 
+                            if ( rc )
+                                break; 
                         }
                     } 
+#endif
 
                     okay = mod_l1_entry(va, l1e, mfn,
                                         cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
@@ -4806,8 +4809,10 @@ long arch_memory_op(int op, XEN_GUEST_HA
         return rc;
     }
 
+#ifdef __x86_64__
     case XENMEM_get_sharing_freed_pages:
         return mem_sharing_get_nr_saved_mfns();
+#endif
 
     default:
         return subarch_memory_op(op, arg);
diff -r 3ea84fd20b26 -r 6b5a5bfaf357 xen/arch/x86/mm/Makefile
--- a/xen/arch/x86/mm/Makefile  Tue Jun 29 13:43:18 2010 +0100
+++ b/xen/arch/x86/mm/Makefile  Tue Jun 29 18:16:41 2010 +0100
@@ -6,9 +6,9 @@ obj-y += guest_walk_2.o
 obj-y += guest_walk_2.o
 obj-y += guest_walk_3.o
 obj-$(x86_64) += guest_walk_4.o
-obj-y += mem_event.o
-obj-y += mem_paging.o
-obj-y += mem_sharing.o
+obj-$(x86_64) += mem_event.o
+obj-$(x86_64) += mem_paging.o
+obj-$(x86_64) += mem_sharing.o
 
 guest_walk_%.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff -r 3ea84fd20b26 -r 6b5a5bfaf357 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Jun 29 13:43:18 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Tue Jun 29 18:16:41 2010 +0100
@@ -1868,17 +1868,23 @@ void p2m_teardown(struct domain *d)
 {
     struct page_info *pg;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+#ifdef __x86_64__
     unsigned long gfn;
     p2m_type_t t;
     mfn_t mfn;
+#endif
 
     p2m_lock(p2m);
-    for(gfn=0; gfn < p2m->max_mapped_pfn; gfn++)
+
+#ifdef __x86_64__
+    for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
     {
         mfn = p2m->get_entry(d, gfn, &t, p2m_query);
-        if(mfn_valid(mfn) && (t == p2m_ram_shared))
+        if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
             BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
     }
+#endif
+
     p2m->phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
@@ -2616,6 +2622,7 @@ clear_mmio_p2m_entry(struct domain *d, u
     return rc;
 }
 
+#ifdef __x86_64__
 int
 set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
@@ -2798,7 +2805,7 @@ void p2m_mem_paging_resume(struct domain
     /* Unpause any domains that were paused because the ring was full */
     mem_event_unpause_vcpus(d);
 }
-
+#endif /* __x86_64__ */
 
 /*
  * Local variables:
diff -r 3ea84fd20b26 -r 6b5a5bfaf357 xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h Tue Jun 29 13:43:18 2010 +0100
+++ b/xen/include/asm-x86/mem_sharing.h Tue Jun 29 18:16:41 2010 +0100
@@ -22,6 +22,8 @@
 #ifndef __MEM_SHARING_H__
 #define __MEM_SHARING_H__
 
+#ifdef __x86_64__
+
 #define sharing_supported(_d) \
     (is_hvm_domain(_d) && paging_mode_hap(_d)) 
 
@@ -43,4 +45,10 @@ int mem_sharing_domctl(struct domain *d,
                        xen_domctl_mem_sharing_op_t *mec);
 void mem_sharing_init(void);
 
+#else 
+
+#define mem_sharing_init()  do { } while (0)
+
+#endif /* __x86_64__ */
+
 #endif /* __MEM_SHARING_H__ */
diff -r 3ea84fd20b26 -r 6b5a5bfaf357 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Tue Jun 29 13:43:18 2010 +0100
+++ b/xen/include/asm-x86/p2m.h Tue Jun 29 18:16:41 2010 +0100
@@ -78,11 +78,12 @@ typedef enum {
     p2m_grant_map_rw = 7,       /* Read/write grant mapping */
     p2m_grant_map_ro = 8,       /* Read-only grant mapping */
 
+    /* Likewise, although these are defined in all builds, they can only
+     * be used in 64-bit builds */
     p2m_ram_paging_out = 9,       /* Memory that is being paged out */
     p2m_ram_paged = 10,           /* Memory that has been paged out */
     p2m_ram_paging_in = 11,       /* Memory that is being paged in */
     p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
-
     p2m_ram_shared = 13,          /* Shared or sharable memory */
 } p2m_type_t;
 
@@ -154,6 +155,7 @@ typedef enum {
 #define p2m_is_paged(_t)    (p2m_to_mask(_t) & P2M_PAGED_TYPES)
 #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
 #define p2m_is_shared(_t)   (p2m_to_mask(_t) & P2M_SHARED_TYPES)
+
 
 /* Populate-on-demand */
 #define POPULATE_ON_DEMAND_MFN  (1<<9)
@@ -323,20 +325,21 @@ static inline mfn_t gfn_to_mfn_unshare(s
                                        int must_succeed)
 {
     mfn_t mfn;
-    int ret;
 
     mfn = gfn_to_mfn(d, gfn, p2mt);
-    if(p2m_is_shared(*p2mt))
+#ifdef __x86_64__
+    if ( p2m_is_shared(*p2mt) )
     {
-        ret = mem_sharing_unshare_page(d, gfn,
-                must_succeed ? MEM_SHARING_MUST_SUCCEED : 0);
-        if(ret < 0)
+        if ( mem_sharing_unshare_page(d, gfn,
+                                      must_succeed 
+                                      ? MEM_SHARING_MUST_SUCCEED : 0) )
         {
             BUG_ON(must_succeed);
             return mfn;
         }
         mfn = gfn_to_mfn(d, gfn, p2mt);
     }
+#endif
 
     return mfn;
 }
@@ -438,10 +441,11 @@ p2m_type_t p2m_change_type(struct domain
 /* Set mmio addresses in the p2m table (for pass-through) */
 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+
+
+#ifdef __x86_64__
 /* Modify p2m table for shared gfn */
-int
-set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
-
+int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 /* Check if a nominated gfn is valid to be paged out */
 int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
 /* Evict a frame */
@@ -452,6 +456,10 @@ int p2m_mem_paging_prep(struct domain *d
 int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct domain *d);
+#else
+static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
+{ }
+#endif
 
 struct page_info *p2m_alloc_ptp(struct domain *d, unsigned long type);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: Only build memory-event features on 64-bit Xen, Xen patchbot-unstable <=