[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/3] tmem: drop direct usage of opt_tmem



Don't use the opt_tmem variable to check if tmem is enabled, instead use
the tmem_enabled() helper function everywhere.

Signed-off-by: Doug Goldstein <cardoe@xxxxxxxxxx>
---
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>

change since v1:
- merged patch 2 and 3
---
 xen/arch/x86/setup.c    | 2 +-
 xen/common/memory.c     | 2 +-
 xen/common/page_alloc.c | 8 ++++----
 xen/common/tmem.c       | 3 +++
 4 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 5011930..c5c332d 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1272,7 +1272,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
             init_domheap_pages(s, e);
         }
 
-        if ( opt_tmem )
+        if ( tmem_enabled() )
         {
            printk(XENLOG_WARNING
                   "TMEM physical RAM limit exceeded, disabling TMEM\n");
diff --git a/xen/common/memory.c b/xen/common/memory.c
index ef57219..c7fca96 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -202,7 +202,7 @@ static void populate_physmap(struct memop_args *a)
 
                 if ( unlikely(!page) )
                 {
-                    if ( !opt_tmem || a->extent_order )
+                    if ( !tmem_enabled() || a->extent_order )
                         gdprintk(XENLOG_INFO,
                                  "Could not allocate order=%u extent: id=%d 
memflags=%#x (%u of %u)\n",
                                  a->extent_order, d->domain_id, a->memflags,
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 22e8feb..98e30e5 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -652,7 +652,7 @@ static void __init setup_low_mem_virq(void)
 static void check_low_mem_virq(void)
 {
     unsigned long avail_pages = total_avail_pages +
-        (opt_tmem ? tmem_freeable_pages() : 0) - outstanding_claims;
+        tmem_freeable_pages() - outstanding_claims;
 
     if ( unlikely(avail_pages <= low_mem_virq_th) )
     {
@@ -738,7 +738,7 @@ static struct page_info *alloc_heap_pages(
      * Others try tmem pools then fail.  This is a workaround until all
      * post-dom0-creation-multi-page allocations can be eliminated.
      */
-    if ( opt_tmem && ((order == 0) || (order >= 9)) &&
+    if ( ((order == 0) || (order >= 9)) &&
          (total_avail_pages <= midsize_alloc_zone_pages) &&
          tmem_freeable_pages() )
         goto try_tmem;
@@ -984,7 +984,7 @@ static void free_heap_pages(
     avail[node][zone] += 1 << order;
     total_avail_pages += 1 << order;
 
-    if ( opt_tmem )
+    if ( tmem_enabled() )
         midsize_alloc_zone_pages = max(
             midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
 
@@ -1755,7 +1755,7 @@ int assign_pages(
     {
         if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
         {
-            if ( !opt_tmem || order != 0 || d->tot_pages != d->max_pages )
+            if ( !tmem_enabled() || order != 0 || d->tot_pages != d->max_pages 
)
                 gprintk(XENLOG_INFO, "Over-allocation for domain %u: "
                         "%u > %u\n", d->domain_id,
                         d->tot_pages + (1 << order), d->max_pages);
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 0436e49..16e249a 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -2837,6 +2837,9 @@ void *tmem_relinquish_pages(unsigned int order, unsigned 
int memflags)
 
 unsigned long tmem_freeable_pages(void)
 {
+    if ( !tmem_enabled() )
+        return 0;
+
     return tmem_page_list_pages + _atomic_read(freeable_page_count);
 }
 
-- 
2.4.10


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.