WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Changes so that Xen can be compiled with gcc 4.0.2: (by

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Changes so that Xen can be compiled with gcc 4.0.2: (by Tristan Gingold)
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sun, 20 Nov 2005 12:58:09 +0000
Delivery-date: Sun, 20 Nov 2005 12:58:45 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID 40fc727dd1c0095d42beaba7b8c2930f907ae58d
# Parent  36cea432bbed8f1405664835e8062646b6a96322
Changes so that Xen can be compiled with gcc 4.0.2: (by Tristan Gingold)
* functions must be declared before being called.
* a cast or a conditionnal expression is not an lvalue.
* a function cannot be declared static and redeclared extern (or reverse).
* gcc 4.0.2 missed a range optimization (ia64_setreg_unknown_kr).
* ia64_ksyms is not used (removed from Makefile).
* (added by Dan M: since now modified, move gfp.h from linux to linux-xen)

diff -r 36cea432bbed -r 40fc727dd1c0 xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile    Wed Nov 16 22:59:41 2005
+++ b/xen/arch/ia64/Makefile    Wed Nov 16 23:45:36 2005
@@ -2,7 +2,7 @@
 
 VPATH = xen vmx linux linux-xen
 
-OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
+OBJS = xensetup.o setup.o time.o irq.o process.o smp.o \
        xenmisc.o acpi.o hypercall.o \
        machvec.o dom0_ops.o domain.o hpsimserial.o pcdp.o \
        idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \
@@ -10,7 +10,7 @@
        extable.o linuxextable.o sort.o xenirq.o xentime.o \
        regionreg.o entry.o unaligned.o privop.o vcpu.o \
        irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \
-       grant_table.o sn_console.o
+       grant_table.o sn_console.o # ia64_ksyms.o 
 
 OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
        vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
@@ -45,7 +45,7 @@
 
 asm-xsi-offsets.s: asm-xsi-offsets.c 
        $(CC) $(CFLAGS) -S -o $@ $<
-       
+
 $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h: asm-xsi-offsets.s
        @(set -e; \
          echo "/*"; \
diff -r 36cea432bbed -r 40fc727dd1c0 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Wed Nov 16 22:59:41 2005
+++ b/xen/arch/ia64/vmx/mmio.c  Wed Nov 16 23:45:36 2005
@@ -49,6 +49,8 @@
 #define PIB_OFST_INTA           0x1E0000
 #define PIB_OFST_XTP            0x1E0008
 
+static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
+
 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int 
ma)
 {
     switch (pib_off) {
diff -r 36cea432bbed -r 40fc727dd1c0 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Nov 16 22:59:41 2005
+++ b/xen/arch/ia64/vmx/vmmu.c  Wed Nov 16 23:45:36 2005
@@ -157,11 +157,13 @@
     printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
     memset(vbase, 0, VCPU_TLB_SIZE);
     vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
-    vhpt = --((thash_cb_t*)vcur);
+    vcur -= sizeof (thash_cb_t);
+    vhpt = vcur;
     vhpt->ht = THASH_VHPT;
     vhpt->vcpu = d;
     vhpt->hash_func = machine_thash;
-    vs = --((vhpt_special *)vcur);
+    vs -= sizeof (vhpt_special);
+    vs = vcur;
 
     /* Setup guest pta */
     pta_value.val = 0;
@@ -199,10 +201,12 @@
     printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
     memset(vbase, 0, VCPU_TLB_SIZE);
     vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
-    tlb = --((thash_cb_t*)vcur);
+    vcur -= sizeof (thash_cb_t);
+    tlb = vcur;
     tlb->ht = THASH_TLB;
     tlb->vcpu = d;
-    ts = --((tlb_special_t *)vcur);
+    vcur -= sizeof (tlb_special_t);
+    ts = vcur;
     tlb->ts = ts;
     tlb->ts->vhpt = init_domain_vhpt(d);
     tlb->hash_func = machine_thash;
diff -r 36cea432bbed -r 40fc727dd1c0 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Wed Nov 16 22:59:41 2005
+++ b/xen/arch/ia64/xen/vcpu.c  Wed Nov 16 23:45:36 2005
@@ -146,6 +146,9 @@
        ia64_set_kr(6, VCPU(vcpu, krs[6]));
        ia64_set_kr(7, VCPU(vcpu, krs[7]));
 }
+
+/* GCC 4.0.2 seems not to be able to suppress this call!.  */
+#define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
 
 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
 {
diff -r 36cea432bbed -r 40fc727dd1c0 
xen/include/asm-ia64/linux-xen/linux/README.origin
--- a/xen/include/asm-ia64/linux-xen/linux/README.origin        Wed Nov 16 
22:59:41 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/README.origin        Wed Nov 16 
23:45:36 2005
@@ -6,5 +6,6 @@
 # easily updated to future versions of the corresponding Linux files.
 
 cpumask.h              -> linux/include/linux/cpumask.h
+gfp.h                  -> linux/include/linux/gfp.h
 hardirq.h              -> linux/include/linux/hardirq.h
 interrupt.h            -> linux/include/linux/interrupt.h
diff -r 36cea432bbed -r 40fc727dd1c0 
xen/include/asm-ia64/linux-xen/linux/interrupt.h
--- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h  Wed Nov 16 22:59:41 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h  Wed Nov 16 23:45:36 2005
@@ -131,7 +131,9 @@
 extern void softirq_init(void);
 #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << 
(nr); } while (0)
 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
+#ifndef XEN
 extern void FASTCALL(raise_softirq(unsigned int nr));
+#endif
 
 
 /* Tasklets --- multithreaded analogue of BHs.
diff -r 36cea432bbed -r 40fc727dd1c0 xen/include/asm-ia64/linux/README.origin
--- a/xen/include/asm-ia64/linux/README.origin  Wed Nov 16 22:59:41 2005
+++ b/xen/include/asm-ia64/linux/README.origin  Wed Nov 16 23:45:36 2005
@@ -10,7 +10,6 @@
 dma-mapping.h          ->linux/include/linux/dma-mapping.h
 efi.h                  ->linux/include/linux/efi.h
 err.h                  ->linux/include/linux/err.h
-gfp.h                  ->linux/include/linux/gfp.h
 initrd.h               ->linux/include/linux/initrd.h
 jiffies.h              ->linux/include/linux/jiffies.h
 kmalloc_sizes.h                ->linux/include/linux/kmalloc_sizes.h
diff -r 36cea432bbed -r 40fc727dd1c0 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Nov 16 22:59:41 2005
+++ b/xen/include/asm-ia64/mm.h Wed Nov 16 23:45:36 2005
@@ -3,7 +3,7 @@
 
 #include <xen/config.h>
 #ifdef LINUX_2_6
-#include <xen/gfp.h>
+#include <linux/gfp.h>
 #endif
 #include <xen/list.h>
 #include <xen/spinlock.h>
diff -r 36cea432bbed -r 40fc727dd1c0 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Wed Nov 16 22:59:41 2005
+++ b/xen/include/asm-ia64/vmmu.h       Wed Nov 16 23:45:36 2005
@@ -222,7 +222,10 @@
 #define ITR(hcb,id)             ((hcb)->ts->itr[id])
 #define DTR(hcb,id)             ((hcb)->ts->dtr[id])
 #define INVALIDATE_HASH(hcb,hash)           {   \
-           INVALID_ENTRY(hcb, hash) = 1;        \
+           if ((hcb)->ht==THASH_TLB)            \
+             INVALID_TLB(hash) = 1;             \
+           else                                 \
+             INVALID_VHPT(hash) = 1;            \
            hash->next = NULL; }
 
 #define PURGABLE_ENTRY(hcb,en)  1
diff -r 36cea432bbed -r 40fc727dd1c0 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Wed Nov 16 22:59:41 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Wed Nov 16 23:45:36 2005
@@ -71,7 +71,9 @@
 extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
+#if 0
 extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
+#endif
 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 
ifa);
diff -r 36cea432bbed -r 40fc727dd1c0 xen/include/asm-ia64/linux-xen/linux/gfp.h
--- /dev/null   Wed Nov 16 22:59:41 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/gfp.h        Wed Nov 16 23:45:36 2005
@@ -0,0 +1,145 @@
+#ifndef __LINUX_GFP_H
+#define __LINUX_GFP_H
+
+#ifdef XEN
+#include <asm/bitops.h>
+#endif
+#include <linux/mmzone.h>
+#include <linux/stddef.h>
+#include <linux/linkage.h>
+#include <linux/config.h>
+
+struct vm_area_struct;
+
+/*
+ * GFP bitmasks..
+ */
+/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
+#define __GFP_DMA      0x01u
+#define __GFP_HIGHMEM  0x02u
+
+/*
+ * Action modifiers - doesn't change the zoning
+ *
+ * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
+ * _might_ fail.  This depends upon the particular VM implementation.
+ *
+ * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures.
+ *
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ */
+#define __GFP_WAIT     0x10u   /* Can wait and reschedule? */
+#define __GFP_HIGH     0x20u   /* Should access emergency pools? */
+#define __GFP_IO       0x40u   /* Can start physical IO? */
+#define __GFP_FS       0x80u   /* Can call down to low-level FS? */
+#define __GFP_COLD     0x100u  /* Cache-cold page required */
+#define __GFP_NOWARN   0x200u  /* Suppress page allocation failure warning */
+#define __GFP_REPEAT   0x400u  /* Retry the allocation.  Might fail */
+#define __GFP_NOFAIL   0x800u  /* Retry for ever.  Cannot fail */
+#define __GFP_NORETRY  0x1000u /* Do not retry.  Might fail */
+#define __GFP_NO_GROW  0x2000u /* Slab internal usage */
+#define __GFP_COMP     0x4000u /* Add compound page metadata */
+#define __GFP_ZERO     0x8000u /* Return zeroed page on success */
+#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
+#define __GFP_NORECLAIM  0x20000u /* No realy zone reclaim during allocation */
+
+#define __GFP_BITS_SHIFT 20    /* Room for 20 __GFP_FOO bits */
+#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
+
+/* if you forget to add the bitmask here kernel will crash, period */
+#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
+                       __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
+                       __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
+                       __GFP_NOMEMALLOC|__GFP_NORECLAIM)
+
+#define GFP_ATOMIC     (__GFP_HIGH)
+#define GFP_NOIO       (__GFP_WAIT)
+#define GFP_NOFS       (__GFP_WAIT | __GFP_IO)
+#define GFP_KERNEL     (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_USER       (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_HIGHUSER   (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
+
+/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
+   platforms, used as appropriate on others */
+
+#define GFP_DMA                __GFP_DMA
+
+
+/*
+ * There is only one page-allocator function, and two main namespaces to
+ * it. The alloc_page*() variants return 'struct page *' and as such
+ * can allocate highmem pages, the *get*page*() variants return
+ * virtual kernel addresses to the allocated page(s).
+ */
+
+/*
+ * We get the zone list from the current node and the gfp_mask.
+ * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ *
+ * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
+ * optimized to &contig_page_data at compile-time.
+ */
+
+#ifndef HAVE_ARCH_FREE_PAGE
+static inline void arch_free_page(struct page *page, int order) { }
+#endif
+
+extern struct page *
+FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
+
+static inline struct page *alloc_pages_node(int nid, unsigned int __nocast 
gfp_mask,
+                                               unsigned int order)
+{
+       if (unlikely(order >= MAX_ORDER))
+               return NULL;
+
+       return __alloc_pages(gfp_mask, order,
+               NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
+}
+
+#ifdef CONFIG_NUMA
+extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, 
unsigned order);
+
+static inline struct page *
+alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
+{
+       if (unlikely(order >= MAX_ORDER))
+               return NULL;
+
+       return alloc_pages_current(gfp_mask, order);
+}
+extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
+                       struct vm_area_struct *vma, unsigned long addr);
+#else
+#define alloc_pages(gfp_mask, order) \
+               alloc_pages_node(numa_node_id(), gfp_mask, order)
+#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#endif
+#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+
+extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, 
unsigned int order));
+extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
+
+#define __get_free_page(gfp_mask) \
+               __get_free_pages((gfp_mask),0)
+
+#define __get_dma_pages(gfp_mask, order) \
+               __get_free_pages((gfp_mask) | GFP_DMA,(order))
+
+extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
+extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
+extern void FASTCALL(free_hot_page(struct page *page));
+extern void FASTCALL(free_cold_page(struct page *page));
+
+#define __free_page(page) __free_pages((page), 0)
+#define free_page(addr) free_pages((addr),0)
+
+void page_alloc_init(void);
+#ifdef CONFIG_NUMA
+void drain_remote_pages(void);
+#else
+static inline void drain_remote_pages(void) { };
+#endif
+
+#endif /* __LINUX_GFP_H */
diff -r 36cea432bbed -r 40fc727dd1c0 xen/include/asm-ia64/linux/gfp.h
--- a/xen/include/asm-ia64/linux/gfp.h  Wed Nov 16 22:59:41 2005
+++ /dev/null   Wed Nov 16 23:45:36 2005
@@ -1,142 +0,0 @@
-#ifndef __LINUX_GFP_H
-#define __LINUX_GFP_H
-
-#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
-#include <linux/config.h>
-
-struct vm_area_struct;
-
-/*
- * GFP bitmasks..
- */
-/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
-#define __GFP_DMA      0x01u
-#define __GFP_HIGHMEM  0x02u
-
-/*
- * Action modifiers - doesn't change the zoning
- *
- * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
- * _might_ fail.  This depends upon the particular VM implementation.
- *
- * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures.
- *
- * __GFP_NORETRY: The VM implementation must not retry indefinitely.
- */
-#define __GFP_WAIT     0x10u   /* Can wait and reschedule? */
-#define __GFP_HIGH     0x20u   /* Should access emergency pools? */
-#define __GFP_IO       0x40u   /* Can start physical IO? */
-#define __GFP_FS       0x80u   /* Can call down to low-level FS? */
-#define __GFP_COLD     0x100u  /* Cache-cold page required */
-#define __GFP_NOWARN   0x200u  /* Suppress page allocation failure warning */
-#define __GFP_REPEAT   0x400u  /* Retry the allocation.  Might fail */
-#define __GFP_NOFAIL   0x800u  /* Retry for ever.  Cannot fail */
-#define __GFP_NORETRY  0x1000u /* Do not retry.  Might fail */
-#define __GFP_NO_GROW  0x2000u /* Slab internal usage */
-#define __GFP_COMP     0x4000u /* Add compound page metadata */
-#define __GFP_ZERO     0x8000u /* Return zeroed page on success */
-#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
-#define __GFP_NORECLAIM  0x20000u /* No realy zone reclaim during allocation */
-
-#define __GFP_BITS_SHIFT 20    /* Room for 20 __GFP_FOO bits */
-#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
-
-/* if you forget to add the bitmask here kernel will crash, period */
-#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
-                       __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
-                       __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
-                       __GFP_NOMEMALLOC|__GFP_NORECLAIM)
-
-#define GFP_ATOMIC     (__GFP_HIGH)
-#define GFP_NOIO       (__GFP_WAIT)
-#define GFP_NOFS       (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL     (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_USER       (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_HIGHUSER   (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
-
-/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
-   platforms, used as appropriate on others */
-
-#define GFP_DMA                __GFP_DMA
-
-
-/*
- * There is only one page-allocator function, and two main namespaces to
- * it. The alloc_page*() variants return 'struct page *' and as such
- * can allocate highmem pages, the *get*page*() variants return
- * virtual kernel addresses to the allocated page(s).
- */
-
-/*
- * We get the zone list from the current node and the gfp_mask.
- * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
- *
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
- */
-
-#ifndef HAVE_ARCH_FREE_PAGE
-static inline void arch_free_page(struct page *page, int order) { }
-#endif
-
-extern struct page *
-FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
-
-static inline struct page *alloc_pages_node(int nid, unsigned int __nocast 
gfp_mask,
-                                               unsigned int order)
-{
-       if (unlikely(order >= MAX_ORDER))
-               return NULL;
-
-       return __alloc_pages(gfp_mask, order,
-               NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
-}
-
-#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, 
unsigned order);
-
-static inline struct page *
-alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
-{
-       if (unlikely(order >= MAX_ORDER))
-               return NULL;
-
-       return alloc_pages_current(gfp_mask, order);
-}
-extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
-                       struct vm_area_struct *vma, unsigned long addr);
-#else
-#define alloc_pages(gfp_mask, order) \
-               alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
-#endif
-#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-
-extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, 
unsigned int order));
-extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
-
-#define __get_free_page(gfp_mask) \
-               __get_free_pages((gfp_mask),0)
-
-#define __get_dma_pages(gfp_mask, order) \
-               __get_free_pages((gfp_mask) | GFP_DMA,(order))
-
-extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
-extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
-extern void FASTCALL(free_hot_page(struct page *page));
-extern void FASTCALL(free_cold_page(struct page *page));
-
-#define __free_page(page) __free_pages((page), 0)
-#define free_page(addr) free_pages((addr),0)
-
-void page_alloc_init(void);
-#ifdef CONFIG_NUMA
-void drain_remote_pages(void);
-#else
-static inline void drain_remote_pages(void) { };
-#endif
-
-#endif /* __LINUX_GFP_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Changes so that Xen can be compiled with gcc 4.0.2: (by Tristan Gingold), Xen patchbot -unstable <=