[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] tmem: add full support for x86 up to 16Tb



tmem used to have code assuming to be able to directly access all memory.
This patch try to fix this problem fully so that tmem can work on x86 up to
16Tb.

tmem allocates pages mainly for two purposes.
1. store pages passed from guests through the frontswap/cleancache frontend.
In this case tmem code has already using map_domain_page() before
accessing the memory, no need to change for 16Tb supporting.

2. store tmem meta data.
In this case, there is a problem if we use map_domain_page(). That's the mapping
entrys are limited, inÂtheÂ2ÂVCPUÂguestÂweÂonlyÂhaveÂ32Âentries and tmem can't
call unmap in a shortÂtime.
The fixing is allocate xen heap pages instead of domain heap for tmem meta
data.

Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
---
 xen/common/tmem.c          |    9 ++++--
 xen/common/tmem_xen.c      |    4 +--
 xen/include/xen/tmem_xen.h |   71 ++++++++++++++++++++++++++++++--------------
 3 files changed, 56 insertions(+), 28 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index a122651..ce6a788 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -352,10 +352,13 @@ static NOINLINE pfp_t *tmem_page_alloc(pool_t *pool)
 {
     pfp_t *pfp = NULL;
 
+    /*
+     * All pages are allocated from domain heap.
+     */
     if ( pool != NULL && is_persistent(pool) )
-        pfp = tmh_alloc_page_thispool(pool);
+        pfp = tmh_alloc_page_thispool(pool,0);
     else
-        pfp = tmh_alloc_page(pool,0);
+        pfp = tmh_alloc_page(pool,0,0);
     if ( pfp == NULL )
         alloc_page_failed++;
     else
@@ -2911,7 +2914,7 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, 
unsigned int memflags)
             read_lock(&tmem_rwlock);
     }
 
-    while ( (pfp = tmh_alloc_page(NULL,1)) == NULL )
+    while ( (pfp = tmh_alloc_page(NULL,1,0)) == NULL )
     {
         if ( (max_evictions-- <= 0) || !tmem_evict())
             break;
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 54ec09f..3496c82 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -353,7 +353,7 @@ static noinline void *tmh_mempool_page_get(unsigned long 
size)
     struct page_info *pi;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = tmh_alloc_page(NULL,0)) == NULL )
+    if ( (pi = tmh_alloc_page(NULL,0,1)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
@@ -382,7 +382,7 @@ static void *tmh_persistent_pool_page_get(unsigned long 
size)
     struct domain *d = current->domain;
 
     ASSERT(size == PAGE_SIZE);
-    if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
+    if ( (pi = _tmh_alloc_page_thispool(d,1)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
     return page_to_virt(pi);
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index ad1ddd5..d52d68c 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -156,7 +156,8 @@ static inline void _tmh_free_subpage_thispool(struct 
xmem_pool *cmem_mempool,
 #define tmh_free_subpage_thispool(_pool, _p, _s) \
  _tmh_free_subpage_thispool(_pool->client->tmh->persistent_pool, _p, _s)
 
-static inline struct page_info *_tmh_alloc_page_thispool(struct domain *d)
+static inline struct page_info *_tmh_alloc_page_thispool(struct domain *d,
+                                                       int xen_heap)
 {
     struct page_info *pi;
 
@@ -166,38 +167,48 @@ static inline struct page_info 
*_tmh_alloc_page_thispool(struct domain *d)
     if ( d->tot_pages >= d->max_pages )
         return NULL;
 
-    if ( tmh_page_list_pages )
-    {
-        if ( (pi = tmh_page_list_get()) != NULL )
-        {
-            if ( donate_page(d,pi,0) == 0 )
-                goto out;
-            else
-                tmh_page_list_put(pi);
-        }
+    if (xen_heap)
+           pi = alloc_xenheap_pages(0,MEMF_tmem);
+    else {
+           if ( tmh_page_list_pages )
+           {
+                   if ( (pi = tmh_page_list_get()) != NULL )
+                   {
+                           if ( donate_page(d,pi,0) == 0 )
+                                   goto out;
+                           else
+                                   tmh_page_list_put(pi);
+                   }
+           }
+
+           pi = alloc_domheap_pages(d,0,MEMF_tmem);
     }
 
-    pi = alloc_domheap_pages(d,0,MEMF_tmem);
-
 out:
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     return pi;
 }
-#define tmh_alloc_page_thispool(_pool) \
-    _tmh_alloc_page_thispool(_pool->client->tmh->domain)
+#define tmh_alloc_page_thispool(_pool,xen_heap) \
+    _tmh_alloc_page_thispool(_pool->client->tmh->domain,xen_heap)
 
 static inline void _tmh_free_page_thispool(struct page_info *pi)
 {
     struct domain *d = page_get_owner(pi);
+    int xen_heap = is_xen_heap_page(pi);
 
     ASSERT(IS_VALID_PAGE(pi));
-    if ( (d == NULL) || steal_page(d,pi,0) == 0 )
-        tmh_page_list_put(pi);
+    if ( (d == NULL) || steal_page(d,pi,0) == 0 ) {
+           if (!xen_heap)
+                   tmh_page_list_put(pi);
+    }
     else
     {
         scrub_one_page(pi);
         ASSERT((pi->count_info & ~(PGC_allocated | 1)) == 0);
-        free_domheap_pages(pi,0);
+       if (xen_heap)
+               free_xenheap_pages(pi,0);
+       else
+               free_domheap_pages(pi,0);
     }
 }
 #define tmh_free_page_thispool(_pool,_pg) \
@@ -221,12 +232,19 @@ static inline void tmh_free_subpage(void *ptr, size_t 
size)
     xmem_pool_free(ptr,tmh_mempool);
 }
 
-static inline struct page_info *tmh_alloc_page(void *pool, int no_heap)
+static inline struct page_info *tmh_alloc_page(void *pool, int no_heap,
+                                               int xen_heap)
 {
-    struct page_info *pi = tmh_page_list_get();
+    struct page_info *pi = NULL;
+    if (!xen_heap)
+           pi = tmh_page_list_get();
 
-    if ( pi == NULL && !no_heap )
-        pi = alloc_domheap_pages(0,0,MEMF_tmem);
+    if ( pi == NULL && !no_heap ) {
+       if (xen_heap)
+               pi = alloc_xenheap_pages(0,MEMF_tmem);
+       else
+               pi = alloc_domheap_pages(0,0,MEMF_tmem);
+    }
     ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
     if ( pi != NULL && !no_heap )
         atomic_inc(&freeable_page_count);
@@ -235,9 +253,16 @@ static inline struct page_info *tmh_alloc_page(void *pool, 
int no_heap)
 
 static inline void tmh_free_page(struct page_info *pi)
 {
+    int xen_heap = is_xen_heap_page(pi);
     ASSERT(IS_VALID_PAGE(pi));
-    tmh_page_list_put(pi);
-    atomic_dec(&freeable_page_count);
+    if (xen_heap){
+           scrub_one_page(pi);
+           ASSERT((pi->count_info & ~(PGC_allocated | 1)) == 0);
+           free_xenheap_pages(pi,0);
+    } else {
+           tmh_page_list_put(pi);
+           atomic_dec(&freeable_page_count);
+    }
 }
 
 static inline unsigned int tmem_subpage_maxsize(void)
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.