[IA64] use page_list_head and related stuff.
Use page_list_head and stuff for consistency with x86 code.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -608,7 +608,7 @@ int arch_domain_create(struct domain *d,
memset(&d->arch.mm, 0, sizeof(d->arch.mm));
d->arch.relres = RELRES_not_started;
d->arch.mm_teardown_offset = 0;
- INIT_LIST_HEAD(&d->arch.relmem_list);
+ INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
goto fail_nomem;
@@ -1626,10 +1626,10 @@ int arch_set_info_guest(struct vcpu *v,
return rc;
}
-static int relinquish_memory(struct domain *d, struct list_head *list)
+static int relinquish_memory(struct domain *d, struct page_list_head *list)
{
- struct list_head *ent;
struct page_info *page;
+ struct page_info *cur;
#ifndef __ia64__
unsigned long x, y;
#endif
@@ -1637,16 +1637,17 @@ static int relinquish_memory(struct doma
/* Use a recursive lock, as we may enter 'free_domheap_page'. */
spin_lock_recursive(&d->page_alloc_lock);
- ent = list->next;
- while ( ent != list )
+
+ page = page_list_first(list);
+ while ( !page_list_is_eol(page, list) )
{
- page = list_entry(ent, struct page_info, list);
/* Grab a reference to the page so it won't disappear from under us. */
if ( unlikely(!get_page(page, d)) )
{
/* Couldn't get a reference -- someone is freeing this page. */
- ent = ent->next;
- list_move_tail(&page->list, &d->arch.relmem_list);
+ cur = page;
+ page = page_list_next(page, list);
+ page_list_move_tail(cur, list, &d->arch.relmem_list);
continue;
}
@@ -1681,10 +1682,11 @@ static int relinquish_memory(struct doma
#endif
/* Follow the list chain and /then/ potentially free the page. */
- ent = ent->next;
BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
- list_move_tail(&page->list, &d->arch.relmem_list);
- put_page(page);
+ cur = page;
+ page = page_list_next(page, list);
+ page_list_move_tail(cur, list, &d->arch.relmem_list);
+ put_page(cur);
if (hypercall_preempt_check()) {
ret = -EAGAIN;
@@ -1692,7 +1694,7 @@ static int relinquish_memory(struct doma
}
}
- list_splice_init(&d->arch.relmem_list, list);
+ page_list_splice_init(&d->arch.relmem_list, list);
out:
spin_unlock_recursive(&d->page_alloc_lock);
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -474,7 +474,7 @@ share_xen_page_with_guest(struct page_in
page->count_info |= PGC_allocated | 1;
if ( unlikely(d->xenheap_pages++ == 0) )
get_knownalive_domain(d);
- list_add_tail(&page->list, &d->xenpage_list);
+ page_list_add_tail(page, &d->xenpage_list);
}
// grant_table_destroy() releases these pages.
@@ -2856,7 +2856,7 @@ steal_page(struct domain *d, struct page
/* Unlink from original owner. */
if ( !(memflags & MEMF_no_refcount) )
d->tot_pages--;
- list_del(&page->list);
+ page_list_del(page, &d->page_list);
spin_unlock(&d->page_alloc_lock);
perfc_incr(steal_page);
diff --git a/xen/arch/ia64/xen/tlb_track.c b/xen/arch/ia64/xen/tlb_track.c
--- a/xen/arch/ia64/xen/tlb_track.c
+++ b/xen/arch/ia64/xen/tlb_track.c
@@ -56,7 +56,7 @@ tlb_track_allocate_entries(struct tlb_tr
return -ENOMEM;
}
- list_add(&entry_page->list, &tlb_track->page_list);
+ page_list_add(entry_page, &tlb_track->page_list);
track_entries = (struct tlb_track_entry*)page_to_virt(entry_page);
allocated = PAGE_SIZE / sizeof(track_entries[0]);
tlb_track->num_entries += allocated;
@@ -93,7 +93,7 @@ tlb_track_create(struct domain* d)
tlb_track->limit = TLB_TRACK_LIMIT_ENTRIES;
tlb_track->num_entries = 0;
tlb_track->num_free = 0;
- INIT_LIST_HEAD(&tlb_track->page_list);
+ INIT_PAGE_LIST_HEAD(&tlb_track->page_list);
if (tlb_track_allocate_entries(tlb_track) < 0)
goto out;
@@ -136,8 +136,8 @@ tlb_track_destroy(struct domain* d)
spin_lock(&tlb_track->free_list_lock);
BUG_ON(tlb_track->num_free != tlb_track->num_entries);
- list_for_each_entry_safe(page, next, &tlb_track->page_list, list) {
- list_del(&page->list);
+ page_list_for_each_safe(page, next, &tlb_track->page_list) {
+ page_list_del(page, &tlb_track->page_list);
free_domheap_page(page);
}
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -10,6 +10,7 @@
#include <asm/vmx_platform.h>
#include <xen/list.h>
#include <xen/cpumask.h>
+#include <xen/mm.h>
#include <asm/fpswa.h>
#include <xen/rangeset.h>
@@ -224,7 +225,7 @@ struct arch_domain {
/* Continuable mm_teardown() */
unsigned long mm_teardown_offset;
/* Continuable domain_relinquish_resources() */
- struct list_head relmem_list;
+ struct page_list_head relmem_list;
};
#define INT_ENABLE_OFFSET(v) \
(sizeof(vcpu_info_t) * (v)->vcpu_id + \
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h
+++ b/xen/include/asm-ia64/mm.h
@@ -39,10 +39,23 @@ typedef unsigned long page_flags_t;
#define PRtype_info "016lx"
+#if 0
+/*
+ * See include/xen/mm.h.
+ * For now, abandon to compress struct page_info
+ * seeing IA64_MAX_PHYS_BITS and page size.
+ */
+#undef page_list_entry
+struct page_list_entry
+{
+ unsigned long next, prev;
+};
+#endif
+
struct page_info
{
/* Each frame can be threaded onto a doubly-linked list. */
- struct list_head list;
+ struct page_list_entry list;
/* Reference count and various PGC_xxx flags and fields. */
unsigned long count_info;
diff --git a/xen/include/asm-ia64/tlb_track.h b/xen/include/asm-ia64/tlb_track.h
--- a/xen/include/asm-ia64/tlb_track.h
+++ b/xen/include/asm-ia64/tlb_track.h
@@ -72,7 +72,7 @@ struct tlb_track {
unsigned int limit;
unsigned int num_entries;
unsigned int num_free;
- struct list_head page_list;
+ struct page_list_head page_list;
/* XXX hash table size */
spinlock_t hash_lock;
--
yamahata
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|