# HG changeset patch
# User iap10@xxxxxxxxxxxxxxxxxxxxx
# Node ID 3bde4219c6813b35eebd9fa436fa5d5ab281f486
# Parent 5db85ba1c4e034a84aaea053052719320cefebfb
# Parent 5321e0858b0d8ab14b42cda79db23a303f95ee73
manual merge
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Sep 8 17:36:23 2005
+++ b/xen/arch/x86/domain.c Thu Sep 8 17:40:37 2005
@@ -381,11 +381,13 @@
out:
free_vmcs(vmcs);
if(v->arch.arch_vmx.io_bitmap_a != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_a = 0;
}
if(v->arch.arch_vmx.io_bitmap_b != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_b = 0;
}
v->arch.arch_vmx.vmcs = 0;
@@ -972,11 +974,13 @@
BUG_ON(v->arch.arch_vmx.vmcs == NULL);
free_vmcs(v->arch.arch_vmx.vmcs);
if(v->arch.arch_vmx.io_bitmap_a != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_a = 0;
}
if(v->arch.arch_vmx.io_bitmap_b != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_b = 0;
}
v->arch.arch_vmx.vmcs = 0;
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Thu Sep 8 17:36:23 2005
+++ b/xen/arch/x86/domain_build.c Thu Sep 8 17:40:37 2005
@@ -75,15 +75,12 @@
struct pfn_info *page;
unsigned int order;
/*
- * Allocate up to 2MB at a time:
- * 1. This prevents overflow of get_order() when allocating more than
- * 4GB to domain 0 on a PAE machine.
- * 2. It prevents allocating very large chunks from DMA pools before
- * the >4GB pool is fully depleted.
+ * Allocate up to 2MB at a time: It prevents allocating very large chunks
+ * from DMA pools before the >4GB pool is fully depleted.
*/
if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
max_pages = 2UL << (20 - PAGE_SHIFT);
- order = get_order(max_pages << PAGE_SHIFT);
+ order = get_order_from_pages(max_pages);
if ( (max_pages & (max_pages-1)) != 0 )
order--;
while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
@@ -252,7 +249,7 @@
#endif
}
- order = get_order(v_end - dsi.v_start);
+ order = get_order_from_bytes(v_end - dsi.v_start);
if ( (1UL << order) > nr_pages )
panic("Domain 0 allocation is too small for kernel image.\n");
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c Thu Sep 8 17:36:23 2005
+++ b/xen/arch/x86/vmx_vmcs.c Thu Sep 8 17:40:37 2005
@@ -44,7 +44,7 @@
rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
vmcs_size = vmx_msr_high & 0x1fff;
- vmcs = alloc_xenheap_pages(get_order(vmcs_size));
+ vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size));
memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
vmcs->vmcs_revision_id = vmx_msr_low;
@@ -55,7 +55,7 @@
{
int order;
- order = get_order(vmcs_size);
+ order = get_order_from_bytes(vmcs_size);
free_xenheap_pages(vmcs, order);
}
@@ -76,8 +76,8 @@
error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
/* need to use 0x1000 instead of PAGE_SIZE */
- io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000));
- io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000));
+ io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
+ io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
memset(io_bitmap_a, 0xff, 0x1000);
/* don't bother debug port access */
clear_bit(PC_DEBUG_PORT, io_bitmap_a);
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c Thu Sep 8 17:36:23 2005
+++ b/xen/arch/x86/x86_32/mm.c Thu Sep 8 17:40:37 2005
@@ -118,7 +118,8 @@
}
/* Set up mapping cache for domain pages. */
- mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
+ mapcache_order = get_order_from_bytes(
+ MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
mapcache = alloc_xenheap_pages(mapcache_order);
memset(mapcache, 0, PAGE_SIZE << mapcache_order);
for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/common/grant_table.c
--- a/xen/common/grant_table.c Thu Sep 8 17:36:23 2005
+++ b/xen/common/grant_table.c Thu Sep 8 17:40:37 2005
@@ -399,7 +399,7 @@
{
int i;
grant_mapping_t *new_mt;
- grant_table_t *lgt = ld->grant_table;
+ grant_table_t *lgt = ld->grant_table;
if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
{
@@ -437,9 +437,8 @@
ref, dom, dev_hst_ro_flags);
#endif
- if ( 0 <= ( rc = __gnttab_activate_grant_ref( ld, led, rd, ref,
- dev_hst_ro_flags,
- addr, &frame)))
+ if ( (rc = __gnttab_activate_grant_ref(ld, led, rd, ref, dev_hst_ro_flags,
+ addr, &frame)) >= 0 )
{
/*
* Only make the maptrack live _after_ writing the pte, in case we
@@ -807,7 +806,8 @@
int i;
int result = GNTST_okay;
- for (i = 0; i < count; i++) {
+ for ( i = 0; i < count; i++ )
+ {
gnttab_donate_t *gop = &uop[i];
#if GRANT_DEBUG
printk("gnttab_donate: i=%d mfn=%lx domid=%d gref=%08x\n",
@@ -881,30 +881,6 @@
* headroom. Also, a domain mustn't have PGC_allocated
* pages when it is dying.
*/
-#ifdef GRANT_DEBUG
- if (unlikely(e->tot_pages >= e->max_pages)) {
- printk("gnttab_dontate: no headroom tot_pages=%d max_pages=%d\n",
- e->tot_pages, e->max_pages);
- spin_unlock(&e->page_alloc_lock);
- put_domain(e);
- gop->status = result = GNTST_general_error;
- break;
- }
- if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags))) {
- printk("gnttab_donate: target domain is dying\n");
- spin_unlock(&e->page_alloc_lock);
- put_domain(e);
- gop->status = result = GNTST_general_error;
- break;
- }
- if (unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) {
- printk("gnttab_donate: gnttab_prepare_for_transfer fails.\n");
- spin_unlock(&e->page_alloc_lock);
- put_domain(e);
- gop->status = result = GNTST_general_error;
- break;
- }
-#else
ASSERT(e->tot_pages <= e->max_pages);
if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) ||
unlikely(e->tot_pages == e->max_pages) ||
@@ -914,11 +890,10 @@
e->tot_pages, e->max_pages, gop->handle, e->d_flags);
spin_unlock(&e->page_alloc_lock);
put_domain(e);
- /* XXX SMH: better error return here would be useful */
gop->status = result = GNTST_general_error;
break;
}
-#endif
+
/* Okay, add the page to 'e'. */
if (unlikely(e->tot_pages++ == 0)) {
get_knownalive_domain(e);
@@ -957,38 +932,38 @@
rc = -EFAULT;
switch ( cmd )
- {
- case GNTTABOP_map_grant_ref:
- if ( unlikely(!array_access_ok(
- uop, count, sizeof(gnttab_map_grant_ref_t))) )
- goto out;
- rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
- break;
- case GNTTABOP_unmap_grant_ref:
- if ( unlikely(!array_access_ok(
- uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
- goto out;
- rc = gnttab_unmap_grant_ref((gnttab_unmap_grant_ref_t *)uop,
- count);
- break;
- case GNTTABOP_setup_table:
- rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
- break;
+ {
+ case GNTTABOP_map_grant_ref:
+ if ( unlikely(!array_access_ok(
+ uop, count, sizeof(gnttab_map_grant_ref_t))) )
+ goto out;
+ rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
+ break;
+ case GNTTABOP_unmap_grant_ref:
+ if ( unlikely(!array_access_ok(
+ uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
+ goto out;
+ rc = gnttab_unmap_grant_ref(
+ (gnttab_unmap_grant_ref_t *)uop, count);
+ break;
+ case GNTTABOP_setup_table:
+ rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
+ break;
#if GRANT_DEBUG
- case GNTTABOP_dump_table:
- rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
- break;
+ case GNTTABOP_dump_table:
+ rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
+ break;
#endif
- case GNTTABOP_donate:
- if (unlikely(!array_access_ok(uop, count,
- sizeof(gnttab_donate_t))))
- goto out;
- rc = gnttab_donate(uop, count);
- break;
- default:
- rc = -ENOSYS;
- break;
- }
+ case GNTTABOP_donate:
+ if (unlikely(!array_access_ok(
+ uop, count, sizeof(gnttab_donate_t))))
+ goto out;
+ rc = gnttab_donate(uop, count);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
out:
UNLOCK_BIGLOCK(d);
@@ -1021,17 +996,17 @@
lgt = ld->grant_table;
#if GRANT_DEBUG_VERBOSE
- if ( ld->domain_id != 0 ) {
- DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
- rd->domain_id, ld->domain_id, frame, readonly);
- }
+ if ( ld->domain_id != 0 )
+ DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
+ rd->domain_id, ld->domain_id, frame, readonly);
#endif
/* Fast exit if we're not mapping anything using grant tables */
if ( lgt->map_count == 0 )
return 0;
- if ( get_domain(rd) == 0 ) {
+ if ( get_domain(rd) == 0 )
+ {
DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n",
rd->domain_id);
return 0;
@@ -1268,8 +1243,11 @@
for ( i = 0; i < NR_GRANT_FRAMES; i++ )
{
SHARE_PFN_WITH_DOMAIN(
- virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d);
- set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i,
INVALID_M2P_ENTRY);
+ virt_to_page((char *)t->shared + (i * PAGE_SIZE)),
+ d);
+ set_pfn_from_mfn(
+ (virt_to_phys(t->shared) >> PAGE_SHIFT) + i,
+ INVALID_M2P_ENTRY);
}
/* Okay, install the structure. */
@@ -1306,57 +1284,53 @@
{
map = >->maptrack[handle];
- if ( map->ref_and_flags & GNTMAP_device_map )
- {
- dom = map->domid;
- ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
-
- DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
- handle, ref,
- map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
-
- if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
- unlikely(ld == rd) )
+ if ( !(map->ref_and_flags & GNTMAP_device_map) )
+ continue;
+
+ dom = map->domid;
+ ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
+
+ DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
+ handle, ref, map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
+
+ if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
+ unlikely(ld == rd) )
+ {
+ if ( rd != NULL )
+ put_domain(rd);
+ printk(KERN_WARNING "Grant release: No dom%d\n", dom);
+ continue;
+ }
+
+ act = &rd->grant_table->active[ref];
+ sha = &rd->grant_table->shared[ref];
+
+ spin_lock(&rd->grant_table->lock);
+
+ if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
+ {
+ frame = act->frame;
+
+ if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
+ ( (act->pin & GNTPIN_devw_mask) > 0 ) )
{
- if ( rd != NULL )
- put_domain(rd);
-
- printk(KERN_WARNING "Grant release: No dom%d\n", dom);
- continue;
+ clear_bit(_GTF_writing, &sha->flags);
+ put_page_type(&frame_table[frame]);
}
- act = &rd->grant_table->active[ref];
- sha = &rd->grant_table->shared[ref];
-
- spin_lock(&rd->grant_table->lock);
-
- if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
+ map->ref_and_flags &= ~GNTMAP_device_map;
+ act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
+ if ( act->pin == 0 )
{
- frame = act->frame;
-
- if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
- ( (act->pin & GNTPIN_devw_mask) > 0 ) )
- {
- clear_bit(_GTF_writing, &sha->flags);
- put_page_type(&frame_table[frame]);
- }
-
- act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
-
- if ( act->pin == 0 )
- {
- clear_bit(_GTF_reading, &sha->flags);
- map->ref_and_flags = 0;
- put_page(&frame_table[frame]);
- }
- else
- map->ref_and_flags &= ~GNTMAP_device_map;
+ clear_bit(_GTF_reading, &sha->flags);
+ map->ref_and_flags = 0;
+ put_page(&frame_table[frame]);
}
-
- spin_unlock(&rd->grant_table->lock);
-
- put_domain(rd);
- }
+ }
+
+ spin_unlock(&rd->grant_table->lock);
+
+ put_domain(rd);
}
}
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/common/trace.c
--- a/xen/common/trace.c Thu Sep 8 17:36:23 2005
+++ b/xen/common/trace.c Thu Sep 8 17:40:37 2005
@@ -66,7 +66,7 @@
}
nr_pages = num_online_cpus() * opt_tbuf_size;
- order = get_order(nr_pages * PAGE_SIZE);
+ order = get_order_from_pages(nr_pages);
if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
{
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/common/xmalloc.c
--- a/xen/common/xmalloc.c Thu Sep 8 17:36:23 2005
+++ b/xen/common/xmalloc.c Thu Sep 8 17:40:37 2005
@@ -86,7 +86,7 @@
static void *xmalloc_whole_pages(size_t size)
{
struct xmalloc_hdr *hdr;
- unsigned int pageorder = get_order(size);
+ unsigned int pageorder = get_order_from_bytes(size);
hdr = alloc_xenheap_pages(pageorder);
if ( hdr == NULL )
@@ -159,7 +159,7 @@
/* Big allocs free directly. */
if ( hdr->size >= PAGE_SIZE )
{
- free_xenheap_pages(hdr, get_order(hdr->size));
+ free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
return;
}
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/drivers/char/console.c
--- a/xen/drivers/char/console.c Thu Sep 8 17:36:23 2005
+++ b/xen/drivers/char/console.c Thu Sep 8 17:40:37 2005
@@ -627,7 +627,7 @@
if ( bytes == 0 )
return 0;
- order = get_order(bytes);
+ order = get_order_from_bytes(bytes);
debugtrace_buf = alloc_xenheap_pages(order);
ASSERT(debugtrace_buf != NULL);
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/drivers/char/serial.c
--- a/xen/drivers/char/serial.c Thu Sep 8 17:36:23 2005
+++ b/xen/drivers/char/serial.c Thu Sep 8 17:40:37 2005
@@ -366,8 +366,9 @@
void serial_async_transmit(struct serial_port *port)
{
BUG_ON(!port->driver->tx_empty);
- if ( !port->txbuf )
- port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
+ if ( port->txbuf == NULL )
+ port->txbuf = alloc_xenheap_pages(
+ get_order_from_bytes(SERIAL_TXBUFSZ));
}
/*
diff -r 5db85ba1c4e0 -r 3bde4219c681 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h Thu Sep 8 17:36:23 2005
+++ b/xen/include/asm-x86/page.h Thu Sep 8 17:40:37 2005
@@ -298,8 +298,6 @@
return order;
}
-#define get_order(s) get_order_from_bytes(s)
-
/* Allocator functions for Xen pagetables. */
struct pfn_info *alloc_xen_pagetable(void);
void free_xen_pagetable(struct pfn_info *pg);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|