WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Rename get_order() to get_order_from_bytes() and add

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Rename get_order() to get_order_from_bytes() and add
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 08 Sep 2005 17:26:12 +0000
Delivery-date: Thu, 08 Sep 2005 17:24:39 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID e3fd0fa5836487c6bc641ede6d0fa413887350a8
# Parent  c2705e74efbaba2bf1867a7391e6b76225dd10f9
Rename get_order() to get_order_from_bytes() and add
new function get_order_from_pages(). Fix
HYPERVISOR_memory_op(), properly this time.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r c2705e74efba -r e3fd0fa58364 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Sep  8 15:22:01 2005
+++ b/xen/arch/x86/domain.c     Thu Sep  8 17:25:52 2005
@@ -381,11 +381,13 @@
 out:
     free_vmcs(vmcs);
     if(v->arch.arch_vmx.io_bitmap_a != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_a = 0;
     }
     if(v->arch.arch_vmx.io_bitmap_b != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_b = 0;
     }
     v->arch.arch_vmx.vmcs = 0;
@@ -972,11 +974,13 @@
     BUG_ON(v->arch.arch_vmx.vmcs == NULL);
     free_vmcs(v->arch.arch_vmx.vmcs);
     if(v->arch.arch_vmx.io_bitmap_a != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_a = 0;
     }
     if(v->arch.arch_vmx.io_bitmap_b != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_b = 0;
     }
     v->arch.arch_vmx.vmcs = 0;
diff -r c2705e74efba -r e3fd0fa58364 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Thu Sep  8 15:22:01 2005
+++ b/xen/arch/x86/domain_build.c       Thu Sep  8 17:25:52 2005
@@ -75,15 +75,12 @@
     struct pfn_info *page;
     unsigned int order;
     /*
-     * Allocate up to 2MB at a time:
-     *  1. This prevents overflow of get_order() when allocating more than
-     *     4GB to domain 0 on a PAE machine.
-     *  2. It prevents allocating very large chunks from DMA pools before
-     *     the >4GB pool is fully depleted.
+     * Allocate up to 2MB at a time: It prevents allocating very large chunks
+     * from DMA pools before the >4GB pool is fully depleted.
      */
     if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
         max_pages = 2UL << (20 - PAGE_SHIFT);
-    order = get_order(max_pages << PAGE_SHIFT);
+    order = get_order_from_pages(max_pages);
     if ( (max_pages & (max_pages-1)) != 0 )
         order--;
     while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
@@ -252,7 +249,7 @@
 #endif
     }
 
-    order = get_order(v_end - dsi.v_start);
+    order = get_order_from_bytes(v_end - dsi.v_start);
     if ( (1UL << order) > nr_pages )
         panic("Domain 0 allocation is too small for kernel image.\n");
 
diff -r c2705e74efba -r e3fd0fa58364 xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Thu Sep  8 15:22:01 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Thu Sep  8 17:25:52 2005
@@ -44,7 +44,7 @@
 
     rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
     vmcs_size = vmx_msr_high & 0x1fff;
-    vmcs = alloc_xenheap_pages(get_order(vmcs_size)); 
+    vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); 
     memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
 
     vmcs->vmcs_revision_id = vmx_msr_low;
@@ -55,7 +55,7 @@
 {
     int order;
 
-    order = get_order(vmcs_size);
+    order = get_order_from_bytes(vmcs_size);
     free_xenheap_pages(vmcs, order);
 }
 
@@ -76,8 +76,8 @@
     error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
 
     /* need to use 0x1000 instead of PAGE_SIZE */
-    io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000)); 
-    io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000)); 
+    io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
+    io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
     memset(io_bitmap_a, 0xff, 0x1000);
     /* don't bother debug port access */
     clear_bit(PC_DEBUG_PORT, io_bitmap_a);
diff -r c2705e74efba -r e3fd0fa58364 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Thu Sep  8 15:22:01 2005
+++ b/xen/arch/x86/x86_32/mm.c  Thu Sep  8 17:25:52 2005
@@ -118,7 +118,8 @@
     }
 
     /* Set up mapping cache for domain pages. */
-    mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
+    mapcache_order = get_order_from_bytes(
+        MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
     mapcache = alloc_xenheap_pages(mapcache_order);
     memset(mapcache, 0, PAGE_SIZE << mapcache_order);
     for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
diff -r c2705e74efba -r e3fd0fa58364 xen/common/memory.c
--- a/xen/common/memory.c       Thu Sep  8 15:22:01 2005
+++ b/xen/common/memory.c       Thu Sep  8 17:25:52 2005
@@ -154,7 +154,8 @@
         reservation.nr_extents -= start_extent;
 
         if ( (reservation.address_bits != 0) &&
-             (reservation.address_bits < (get_order(max_page) + PAGE_SHIFT)) )
+             (reservation.address_bits <
+              (get_order_from_pages(max_page) + PAGE_SHIFT)) )
         {
             if ( reservation.address_bits < 31 )
                 return -ENOMEM;
diff -r c2705e74efba -r e3fd0fa58364 xen/common/trace.c
--- a/xen/common/trace.c        Thu Sep  8 15:22:01 2005
+++ b/xen/common/trace.c        Thu Sep  8 17:25:52 2005
@@ -66,7 +66,7 @@
     }
 
     nr_pages = num_online_cpus() * opt_tbuf_size;
-    order    = get_order(nr_pages * PAGE_SIZE);
+    order    = get_order_from_pages(nr_pages);
     
     if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
     {
diff -r c2705e74efba -r e3fd0fa58364 xen/common/xmalloc.c
--- a/xen/common/xmalloc.c      Thu Sep  8 15:22:01 2005
+++ b/xen/common/xmalloc.c      Thu Sep  8 17:25:52 2005
@@ -86,7 +86,7 @@
 static void *xmalloc_whole_pages(size_t size)
 {
     struct xmalloc_hdr *hdr;
-    unsigned int pageorder = get_order(size);
+    unsigned int pageorder = get_order_from_bytes(size);
 
     hdr = alloc_xenheap_pages(pageorder);
     if ( hdr == NULL )
@@ -159,7 +159,7 @@
     /* Big allocs free directly. */
     if ( hdr->size >= PAGE_SIZE )
     {
-        free_xenheap_pages(hdr, get_order(hdr->size));
+        free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
         return;
     }
 
diff -r c2705e74efba -r e3fd0fa58364 xen/drivers/char/console.c
--- a/xen/drivers/char/console.c        Thu Sep  8 15:22:01 2005
+++ b/xen/drivers/char/console.c        Thu Sep  8 17:25:52 2005
@@ -627,7 +627,7 @@
     if ( bytes == 0 )
         return 0;
 
-    order = get_order(bytes);
+    order = get_order_from_bytes(bytes);
     debugtrace_buf = alloc_xenheap_pages(order);
     ASSERT(debugtrace_buf != NULL);
 
diff -r c2705e74efba -r e3fd0fa58364 xen/drivers/char/serial.c
--- a/xen/drivers/char/serial.c Thu Sep  8 15:22:01 2005
+++ b/xen/drivers/char/serial.c Thu Sep  8 17:25:52 2005
@@ -366,8 +366,9 @@
 void serial_async_transmit(struct serial_port *port)
 {
     BUG_ON(!port->driver->tx_empty);
-    if ( !port->txbuf )
-        port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
+    if ( port->txbuf == NULL )
+        port->txbuf = alloc_xenheap_pages(
+            get_order_from_bytes(SERIAL_TXBUFSZ));
 }
 
 /*
diff -r c2705e74efba -r e3fd0fa58364 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        Thu Sep  8 15:22:01 2005
+++ b/xen/include/asm-x86/page.h        Thu Sep  8 17:25:52 2005
@@ -280,12 +280,21 @@
 
 #ifndef __ASSEMBLY__
 
-static __inline__ int get_order(unsigned long size)
+static inline int get_order_from_bytes(physaddr_t size)
 {
     int order;
     size = (size-1) >> PAGE_SHIFT;
     for ( order = 0; size; order++ )
         size >>= 1;
+    return order;
+}
+
+static inline int get_order_from_pages(unsigned long nr_pages)
+{
+    int order;
+    nr_pages--;
+    for ( order = 0; nr_pages; order++ )
+        nr_pages >>= 1;
     return order;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Rename get_order() to get_order_from_bytes() and add, Xen patchbot -unstable <=