# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 1055f276cc4dd5b43ff9e9938a53c8b56c47c1fc
# Parent e3aa5b2387ac479a55ed3754e6c4db51a0f916b3
[IA64] Remove header files where "page" is used
This patch is "step2" which we showed by the following mail.
http://lists.xensource.com/archives/html/xen-ia64-devel/2006-03/msg00305.html
Signed-off-by: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>
Signed-off-by: Masaki Kanno <kanno.masaki@xxxxxxxxxxxxxx>
diff -r e3aa5b2387ac -r 1055f276cc4d xen/arch/ia64/xen/mm_init.c
--- a/xen/arch/ia64/xen/mm_init.c Thu Mar 16 19:22:37 2006
+++ b/xen/arch/ia64/xen/mm_init.c Fri Mar 17 20:23:01 2006
@@ -69,7 +69,7 @@
struct page_info *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
-#ifdef XEN
+#ifndef XEN
void *high_memory;
EXPORT_SYMBOL(high_memory);
diff -r e3aa5b2387ac -r 1055f276cc4d xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c Thu Mar 16 19:22:37 2006
+++ b/xen/arch/ia64/xen/xenmisc.c Fri Mar 17 20:23:01 2006
@@ -112,23 +112,6 @@
return frame;
}
#endif
-
-///////////////////////////////
-// from arch/ia64/page_alloc.c
-///////////////////////////////
-DEFINE_PER_CPU(struct page_state, page_states) = {0};
-unsigned long totalram_pages;
-
-void __mod_page_state(unsigned long offset, unsigned long delta)
-{
- unsigned long flags;
- void* ptr;
-
- local_irq_save(flags);
- ptr = &__get_cpu_var(page_states);
- *(unsigned long*)(ptr + offset) += delta;
- local_irq_restore(flags);
-}
///////////////////////////////
// from arch/x86/flushtlb.c
diff -r e3aa5b2387ac -r 1055f276cc4d xen/include/asm-ia64/linux-xen/asm/page.h
--- a/xen/include/asm-ia64/linux-xen/asm/page.h Thu Mar 16 19:22:37 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h Fri Mar 17 20:23:01 2006
@@ -75,7 +75,7 @@
flush_dcache_page(page); \
} while (0)
-
+#ifndef XEN
#define alloc_zeroed_user_highpage(vma, vaddr) \
({ \
struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma,
vaddr); \
@@ -83,6 +83,7 @@
flush_dcache_page(page); \
page; \
})
+#endif
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
diff -r e3aa5b2387ac -r 1055f276cc4d
xen/include/asm-ia64/linux-xen/asm/pgalloc.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h Thu Mar 16 19:22:37 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h Fri Mar 17 20:23:01 2006
@@ -106,11 +106,13 @@
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#ifndef XEN
static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
{
pmd_val(*pmd_entry) = page_to_maddr(pte);
}
+#endif
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
@@ -118,11 +120,13 @@
pmd_val(*pmd_entry) = __pa(pte);
}
+#ifndef XEN
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long addr)
{
return virt_to_page(pgtable_quicklist_alloc());
}
+#endif
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long addr)
@@ -130,6 +134,7 @@
return pgtable_quicklist_alloc();
}
+#ifndef XEN
static inline void pte_free(struct page *pte)
{
pgtable_quicklist_free(page_address(pte));
@@ -141,6 +146,7 @@
}
#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#endif
extern void check_pgt_cache(void);
diff -r e3aa5b2387ac -r 1055f276cc4d
xen/include/asm-ia64/linux-xen/asm/pgtable.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h Thu Mar 16 19:22:37 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h Fri Mar 17 20:23:01 2006
@@ -467,8 +467,10 @@
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+#ifndef XEN
extern struct page *zero_page_memmap_ptr;
#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
+#endif
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
diff -r e3aa5b2387ac -r 1055f276cc4d xen/include/asm-ia64/linux-xen/linux/gfp.h
--- a/xen/include/asm-ia64/linux-xen/linux/gfp.h Thu Mar 16 19:22:37 2006
+++ b/xen/include/asm-ia64/linux-xen/linux/gfp.h Fri Mar 17 20:23:01 2006
@@ -82,11 +82,11 @@
* optimized to &contig_page_data at compile-time.
*/
+#ifndef XEN
#ifndef HAVE_ARCH_FREE_PAGE
static inline void arch_free_page(struct page *page, int order) { }
#endif
-#ifndef XEN
extern struct page *
FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
@@ -119,7 +119,6 @@
#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#endif /* XEN */
extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask,
unsigned int order));
extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
@@ -137,6 +136,7 @@
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
+#endif /* XEN */
void page_alloc_init(void);
#ifdef CONFIG_NUMA
diff -r e3aa5b2387ac -r 1055f276cc4d xen/include/asm-ia64/linux/README.origin
--- a/xen/include/asm-ia64/linux/README.origin Thu Mar 16 19:22:37 2006
+++ b/xen/include/asm-ia64/linux/README.origin Fri Mar 17 20:23:01 2006
@@ -16,10 +16,8 @@
linkage.h -> linux/include/linux/linkage.h
notifier.h -> linux/include/linux/notifier.h
numa.h -> linux/include/linux/numa.h
-page-flags.h -> linux/include/linux/page-flags.h
percpu.h -> linux/include/linux/percpu.h
preempt.h -> linux/include/linux/preempt.h
-rbtree.h -> linux/include/linux/rbtree.h
rwsem.h -> linux/include/linux/rwsem.h
seqlock.h -> linux/include/linux/seqlock.h
sort.h -> linux/include/linux/sort.h
diff -r e3aa5b2387ac -r 1055f276cc4d
xen/include/asm-ia64/linux/asm-generic/README.origin
--- a/xen/include/asm-ia64/linux/asm-generic/README.origin Thu Mar 16
19:22:37 2006
+++ b/xen/include/asm-ia64/linux/asm-generic/README.origin Fri Mar 17
20:23:01 2006
@@ -10,7 +10,6 @@
errno.h -> linux/include/asm-generic/errno.h
ide_iops.h -> linux/include/asm-generic/ide_iops.h
iomap.h -> linux/include/asm-generic/iomap.h
-pci-dma-compat.h -> linux/include/asm-generic/pci-dma-compat.h
pci.h -> linux/include/asm-generic/pci.h
pgtable.h -> linux/include/asm-generic/pgtable.h
pgtable-nopud.h -> linux/include/asm-generic/pgtable-nopud.h
diff -r e3aa5b2387ac -r 1055f276cc4d
xen/include/asm-ia64/linux/asm/README.origin
--- a/xen/include/asm-ia64/linux/asm/README.origin Thu Mar 16 19:22:37 2006
+++ b/xen/include/asm-ia64/linux/asm/README.origin Fri Mar 17 20:23:01 2006
@@ -42,7 +42,6 @@
rse.h -> linux/include/asm-ia64/rse.h
rwsem.h -> linux/include/asm-ia64/rwsem.h
sal.h -> linux/include/asm-ia64/sal.h
-scatterlist.h -> linux/include/asm-ia64/scatterlist.h
sections.h -> linux/include/asm-ia64/sections.h
semaphore.h -> linux/include/asm-ia64/semaphore.h
setup.h -> linux/include/asm-ia64/setup.h
diff -r e3aa5b2387ac -r 1055f276cc4d xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Thu Mar 16 19:22:37 2006
+++ b/xen/include/asm-ia64/mm.h Fri Mar 17 20:23:01 2006
@@ -9,8 +9,6 @@
#include <xen/spinlock.h>
#include <xen/perfc.h>
#include <xen/sched.h>
-
-#include <linux/rbtree.h>
#include <asm/processor.h>
#include <asm/atomic.h>
diff -r e3aa5b2387ac -r 1055f276cc4d
xen/include/asm-ia64/linux/asm-generic/pci-dma-compat.h
--- a/xen/include/asm-ia64/linux/asm-generic/pci-dma-compat.h Thu Mar 16
19:22:37 2006
+++ /dev/null Fri Mar 17 20:23:01 2006
@@ -1,107 +0,0 @@
-/* include this file if the platform implements the dma_ DMA Mapping API
- * and wants to provide the pci_ DMA Mapping API in terms of it */
-
-#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
-#define _ASM_GENERIC_PCI_DMA_COMPAT_H
-
-#include <linux/dma-mapping.h>
-
-/* note pci_set_dma_mask isn't here, since it's a public function
- * exported from drivers/pci, use dma_supported instead */
-
-static inline int
-pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
-}
-
-static inline void *
-pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size,
dma_handle, GFP_ATOMIC);
-}
-
-static inline void
-pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr,
dma_handle);
-}
-
-static inline dma_addr_t
-pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size,
(enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size,
(enum dma_data_direction)direction);
-}
-
-static inline dma_addr_t
-pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset,
size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
- size_t size, int direction)
-{
- dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size,
(enum dma_data_direction)direction);
-}
-
-static inline int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum
dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum
dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle,
size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev,
dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems,
(enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems,
(enum dma_data_direction)direction);
-}
-
-static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
-{
- return dma_mapping_error(dma_addr);
-}
-
-#endif
diff -r e3aa5b2387ac -r 1055f276cc4d
xen/include/asm-ia64/linux/asm/scatterlist.h
--- a/xen/include/asm-ia64/linux/asm/scatterlist.h Thu Mar 16 19:22:37 2006
+++ /dev/null Fri Mar 17 20:23:01 2006
@@ -1,28 +0,0 @@
-#ifndef _ASM_IA64_SCATTERLIST_H
-#define _ASM_IA64_SCATTERLIST_H
-
-/*
- * Modified 1998-1999, 2001-2002, 2004
- * David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
- */
-
-struct scatterlist {
- struct page *page;
- unsigned int offset;
- unsigned int length; /* buffer length */
-
- dma_addr_t dma_address;
- unsigned int dma_length;
-};
-
-/*
- * It used to be that ISA_DMA_THRESHOLD had something to do with the
- * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
- * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
- * tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
- * address of a page is that is allocated with GFP_DMA. On IA-64,
- * that's 4GB - 1.
- */
-#define ISA_DMA_THRESHOLD 0xffffffff
-
-#endif /* _ASM_IA64_SCATTERLIST_H */
diff -r e3aa5b2387ac -r 1055f276cc4d xen/include/asm-ia64/linux/page-flags.h
--- a/xen/include/asm-ia64/linux/page-flags.h Thu Mar 16 19:22:37 2006
+++ /dev/null Fri Mar 17 20:23:01 2006
@@ -1,324 +0,0 @@
-/*
- * Macros for manipulating and testing page->flags
- */
-
-#ifndef PAGE_FLAGS_H
-#define PAGE_FLAGS_H
-
-#include <linux/percpu.h>
-#include <linux/cache.h>
-#include <asm/pgtable.h>
-
-/*
- * Various page->flags bits:
- *
- * PG_reserved is set for special pages, which can never be swapped out. Some
- * of them might not even exist (eg empty_bad_page)...
- *
- * The PG_private bitflag is set if page->private contains a valid value.
- *
- * During disk I/O, PG_locked is used. This bit is set before I/O and
- * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
- * waiting for the I/O on this page to complete.
- *
- * PG_uptodate tells whether the page's contents is valid. When a read
- * completes, the page becomes uptodate, unless a disk I/O error happened.
- *
- * For choosing which pages to swap out, inode pages carry a PG_referenced bit,
- * which is set any time the system accesses that page through the (mapping,
- * index) hash table. This referenced bit, together with the referenced bit
- * in the page tables, is used to manipulate page->age and move the page across
- * the active, inactive_dirty and inactive_clean lists.
- *
- * Note that the referenced bit, the page->lru list_head and the active,
- * inactive_dirty and inactive_clean lists are protected by the
- * zone->lru_lock, and *NOT* by the usual PG_locked bit!
- *
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
- * PG_arch_1 is an architecture specific page state bit. The generic code
- * guarantees that this bit is cleared for a page when it first is entered into
- * the page cache.
- *
- * PG_highmem pages are not permanently mapped into the kernel virtual address
- * space, they need to be kmapped separately for doing IO on the pages. The
- * struct page (these bits with information) are always mapped into kernel
- * address space...
- */
-
-/*
- * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
- * locked- and dirty-page accounting. The top eight bits of page->flags are
- * used for page->zone, so putting flag bits there doesn't work.
- */
-#define PG_locked 0 /* Page is locked. Don't touch. */
-#define PG_error 1
-#define PG_referenced 2
-#define PG_uptodate 3
-
-#define PG_dirty 4
-#define PG_lru 5
-#define PG_active 6
-#define PG_slab 7 /* slab debug (Suparna wants
this) */
-
-#define PG_checked 8 /* kill me in 2.5.<early>. */
-#define PG_arch_1 9
-#define PG_reserved 10
-#define PG_private 11 /* Has something at ->private */
-
-#define PG_writeback 12 /* Page is under writeback */
-#define PG_nosave 13 /* Used for system suspend/resume */
-#define PG_compound 14 /* Part of a compound page */
-#define PG_swapcache 15 /* Swap page: swp_entry_t in private */
-
-#define PG_mappedtodisk 16 /* Has blocks allocated on-disk
*/
-#define PG_reclaim 17 /* To be reclaimed asap */
-#define PG_nosave_free 18 /* Free, should not be written */
-#define PG_uncached 19 /* Page has been mapped as uncached */
-
-/*
- * Global page accounting. One instance per CPU. Only unsigned longs are
- * allowed.
- */
-struct page_state {
- unsigned long nr_dirty; /* Dirty writeable pages */
- unsigned long nr_writeback; /* Pages under writeback */
- unsigned long nr_unstable; /* NFS unstable pages */
- unsigned long nr_page_table_pages;/* Pages used for pagetables */
- unsigned long nr_mapped; /* mapped into pagetables */
- unsigned long nr_slab; /* In slab */
-#define GET_PAGE_STATE_LAST nr_slab
-
- /*
- * The below are zeroed by get_page_state(). Use get_full_page_state()
- * to add up all these.
- */
- unsigned long pgpgin; /* Disk reads */
- unsigned long pgpgout; /* Disk writes */
- unsigned long pswpin; /* swap reads */
- unsigned long pswpout; /* swap writes */
- unsigned long pgalloc_high; /* page allocations */
-
- unsigned long pgalloc_normal;
- unsigned long pgalloc_dma;
- unsigned long pgfree; /* page freeings */
- unsigned long pgactivate; /* pages moved inactive->active */
- unsigned long pgdeactivate; /* pages moved active->inactive */
-
- unsigned long pgfault; /* faults (major+minor) */
- unsigned long pgmajfault; /* faults (major only) */
- unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
- unsigned long pgrefill_normal;
- unsigned long pgrefill_dma;
-
- unsigned long pgsteal_high; /* total highmem pages reclaimed */
- unsigned long pgsteal_normal;
- unsigned long pgsteal_dma;
- unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
- unsigned long pgscan_kswapd_normal;
-
- unsigned long pgscan_kswapd_dma;
- unsigned long pgscan_direct_high;/* total highmem pages scanned */
- unsigned long pgscan_direct_normal;
- unsigned long pgscan_direct_dma;
- unsigned long pginodesteal; /* pages reclaimed via inode freeing */
-
- unsigned long slabs_scanned; /* slab objects scanned */
- unsigned long kswapd_steal; /* pages reclaimed by kswapd */
- unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
- unsigned long pageoutrun; /* kswapd's calls to page reclaim */
- unsigned long allocstall; /* direct reclaim calls */
-
- unsigned long pgrotated; /* pages rotated to tail of the LRU */
- unsigned long nr_bounce; /* pages for bounce buffers */
-};
-
-extern void get_page_state(struct page_state *ret);
-extern void get_full_page_state(struct page_state *ret);
-extern unsigned long __read_page_state(unsigned long offset);
-extern void __mod_page_state(unsigned long offset, unsigned long delta);
-
-#define read_page_state(member) \
- __read_page_state(offsetof(struct page_state, member))
-
-#define mod_page_state(member, delta) \
- __mod_page_state(offsetof(struct page_state, member), (delta))
-
-#define inc_page_state(member) mod_page_state(member, 1UL)
-#define dec_page_state(member) mod_page_state(member, 0UL - 1)
-#define add_page_state(member,delta) mod_page_state(member, (delta))
-#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
-
-#define mod_page_state_zone(zone, member, delta)
\
- do {
\
- unsigned offset;
\
- if (is_highmem(zone))
\
- offset = offsetof(struct page_state, member##_high);
\
- else if (is_normal(zone))
\
- offset = offsetof(struct page_state, member##_normal);
\
- else
\
- offset = offsetof(struct page_state, member##_dma);
\
- __mod_page_state(offset, (delta));
\
- } while (0)
-
-/*
- * Manipulation of page state flags
- */
-#define PageLocked(page) \
- test_bit(PG_locked, &(page)->flags)
-#define SetPageLocked(page) \
- set_bit(PG_locked, &(page)->flags)
-#define TestSetPageLocked(page) \
- test_and_set_bit(PG_locked, &(page)->flags)
-#define ClearPageLocked(page) \
- clear_bit(PG_locked, &(page)->flags)
-#define TestClearPageLocked(page) \
- test_and_clear_bit(PG_locked, &(page)->flags)
-
-#define PageError(page) test_bit(PG_error, &(page)->flags)
-#define SetPageError(page) set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
-
-#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
-#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced,
&(page)->flags)
-
-#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
-#ifndef SetPageUptodate
-#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
-#endif
-#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
-
-#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
-#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
-#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
-
-#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
-#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
-#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
-#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
-
-#define PageActive(page) test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
-#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
-#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
-
-#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
-#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags)
-#define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags)
-#define TestClearPageSlab(page) test_and_clear_bit(PG_slab,
&(page)->flags)
-#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags)
-
-#ifdef CONFIG_HIGHMEM
-#define PageHighMem(page) is_highmem(page_zone(page))
-#else
-#define PageHighMem(page) 0 /* needed to optimize away at compile time */
-#endif
-
-#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-
-#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
-#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
-#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
-#define __ClearPageReserved(page) __clear_bit(PG_reserved, &(page)->flags)
-
-#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
-#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
-#define PagePrivate(page) test_bit(PG_private, &(page)->flags)
-#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags)
-#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
-
-#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
-#define SetPageWriteback(page) \
- do { \
- if (!test_and_set_bit(PG_writeback, \
- &(page)->flags)) \
- inc_page_state(nr_writeback); \
- } while (0)
-#define TestSetPageWriteback(page) \
- ({ \
- int ret; \
- ret = test_and_set_bit(PG_writeback, \
- &(page)->flags); \
- if (!ret) \
- inc_page_state(nr_writeback); \
- ret; \
- })
-#define ClearPageWriteback(page) \
- do { \
- if (test_and_clear_bit(PG_writeback, \
- &(page)->flags)) \
- dec_page_state(nr_writeback); \
- } while (0)
-#define TestClearPageWriteback(page) \
- ({ \
- int ret; \
- ret = test_and_clear_bit(PG_writeback, \
- &(page)->flags); \
- if (ret) \
- dec_page_state(nr_writeback); \
- ret; \
- })
-
-#define PageNosave(page) test_bit(PG_nosave, &(page)->flags)
-#define SetPageNosave(page) set_bit(PG_nosave, &(page)->flags)
-#define TestSetPageNosave(page) test_and_set_bit(PG_nosave,
&(page)->flags)
-#define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags)
-#define TestClearPageNosave(page) test_and_clear_bit(PG_nosave,
&(page)->flags)
-
-#define PageNosaveFree(page) test_bit(PG_nosave_free, &(page)->flags)
-#define SetPageNosaveFree(page) set_bit(PG_nosave_free, &(page)->flags)
-#define ClearPageNosaveFree(page) clear_bit(PG_nosave_free,
&(page)->flags)
-
-#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
-#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
-#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
-
-#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
-#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
-#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
-#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim,
&(page)->flags)
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
-#else
-#define PageCompound(page) 0
-#endif
-#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags)
-#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags)
-
-#ifdef CONFIG_SWAP
-#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
-#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
-#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
-#else
-#define PageSwapCache(page) 0
-#endif
-
-#define PageUncached(page) test_bit(PG_uncached, &(page)->flags)
-#define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
-#define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
-
-struct page; /* forward declaration */
-
-int test_clear_page_dirty(struct page *page);
-int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
-
-static inline void clear_page_dirty(struct page *page)
-{
- test_clear_page_dirty(page);
-}
-
-static inline void set_page_writeback(struct page *page)
-{
- test_set_page_writeback(page);
-}
-
-#endif /* PAGE_FLAGS_H */
diff -r e3aa5b2387ac -r 1055f276cc4d xen/include/asm-ia64/linux/rbtree.h
--- a/xen/include/asm-ia64/linux/rbtree.h Thu Mar 16 19:22:37 2006
+++ /dev/null Fri Mar 17 20:23:01 2006
@@ -1,141 +0,0 @@
-/*
- Red Black Trees
- (C) 1999 Andrea Arcangeli <andrea@xxxxxxx>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- linux/include/linux/rbtree.h
-
- To use rbtrees you'll have to implement your own insert and search cores.
- This will avoid us to use callbacks and to drop drammatically performances.
- I know it's not the cleaner way, but in C (not in C++) to get
- performances and genericity...
-
- Some example of insert and search follows here. The search is a plain
- normal search over an ordered tree. The insert instead must be implemented
- int two steps: as first thing the code must insert the element in
- order as a red leaf in the tree, then the support library function
- rb_insert_color() must be called. Such function will do the
- not trivial work to rebalance the rbtree if necessary.
-
------------------------------------------------------------------------
-static inline struct page * rb_search_page_cache(struct inode * inode,
- unsigned long offset)
-{
- struct rb_node * n = inode->i_rb_page_cache.rb_node;
- struct page * page;
-
- while (n)
- {
- page = rb_entry(n, struct page, rb_page_cache);
-
- if (offset < page->offset)
- n = n->rb_left;
- else if (offset > page->offset)
- n = n->rb_right;
- else
- return page;
- }
- return NULL;
-}
-
-static inline struct page * __rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- struct rb_node * node)
-{
- struct rb_node ** p = &inode->i_rb_page_cache.rb_node;
- struct rb_node * parent = NULL;
- struct page * page;
-
- while (*p)
- {
- parent = *p;
- page = rb_entry(parent, struct page, rb_page_cache);
-
- if (offset < page->offset)
- p = &(*p)->rb_left;
- else if (offset > page->offset)
- p = &(*p)->rb_right;
- else
- return page;
- }
-
- rb_link_node(node, parent, p);
-
- return NULL;
-}
-
-static inline struct page * rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- struct rb_node * node)
-{
- struct page * ret;
- if ((ret = __rb_insert_page_cache(inode, offset, node)))
- goto out;
- rb_insert_color(node, &inode->i_rb_page_cache);
- out:
- return ret;
-}
------------------------------------------------------------------------
-*/
-
-#ifndef _LINUX_RBTREE_H
-#define _LINUX_RBTREE_H
-
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-
-struct rb_node
-{
- struct rb_node *rb_parent;
- int rb_color;
-#define RB_RED 0
-#define RB_BLACK 1
- struct rb_node *rb_right;
- struct rb_node *rb_left;
-};
-
-struct rb_root
-{
- struct rb_node *rb_node;
-};
-
-#define RB_ROOT (struct rb_root) { NULL, }
-#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-
-extern void rb_insert_color(struct rb_node *, struct rb_root *);
-extern void rb_erase(struct rb_node *, struct rb_root *);
-
-/* Find logical next and previous nodes in a tree */
-extern struct rb_node *rb_next(struct rb_node *);
-extern struct rb_node *rb_prev(struct rb_node *);
-extern struct rb_node *rb_first(struct rb_root *);
-extern struct rb_node *rb_last(struct rb_root *);
-
-/* Fast replacement of a single node without remove/rebalance/add/rebalance */
-extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
- struct rb_root *root);
-
-static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
- struct rb_node ** rb_link)
-{
- node->rb_parent = parent;
- node->rb_color = RB_RED;
- node->rb_left = node->rb_right = NULL;
-
- *rb_link = node;
-}
-
-#endif /* _LINUX_RBTREE_H */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|