[IA64] Widen page counts and domain pointer This is ia64 counter part of 19089:39517e863cc8. This patch is preliminary for removing xenheap. Signed-off-by: Isaku Yamahata diff --git a/xen/arch/ia64/xen/machine_kexec.c b/xen/arch/ia64/xen/machine_kexec.c --- a/xen/arch/ia64/xen/machine_kexec.c +++ b/xen/arch/ia64/xen/machine_kexec.c @@ -200,7 +200,6 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(dom_io); VMCOREINFO_SYMBOL(xen_pstart); VMCOREINFO_SYMBOL(frametable_pg_dir); - VMCOREINFO_SYMBOL_ALIAS(xen_heap_start, xen_pickle_offset); } /* diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c +++ b/xen/arch/ia64/xen/mm.c @@ -1261,17 +1261,18 @@ adjust_page_count_info(struct page_info* int ret = get_page(page, d); BUG_ON(ret == 0); } else { - u64 x, nx, y; + unsigned long x, nx, y; - y = *((u64*)&page->count_info); + y = page->count_info; do { x = y; nx = x + 1; BUG_ON((x >> 32) != 0); BUG_ON((nx & PGC_count_mask) != 2); - y = cmpxchg((u64*)&page->count_info, x, nx); + y = cmpxchg(&page->count_info, x, nx); } while (unlikely(y != x)); + BUG_ON(page_get_owner(page) != NULL); } } @@ -2748,7 +2749,7 @@ steal_page(struct domain *d, struct page #if 0 /* if big endian */ # error "implement big endian version of steal_page()" #endif - u32 x, y; + unsigned long x, y; if (page_get_owner(page) != d) { gdprintk(XENLOG_INFO, "%s d 0x%p owner 0x%p\n", @@ -2808,7 +2809,6 @@ steal_page(struct domain *d, struct page y = page->count_info; do { x = y; - // page->count_info: untouched if (unlikely(((x & (PGC_count_mask | PGC_allocated)) != (1 | PGC_allocated)))) { @@ -2817,7 +2817,7 @@ steal_page(struct domain *d, struct page gdprintk(XENLOG_INFO, "gnttab_transfer: " "Bad page %p: ed=%p(%u), " "sd=%p," - " caf=%016x, taf=%" PRtype_info + " caf=%016lx, taf=%" PRtype_info " memflags 0x%x\n", (void *) page_to_mfn(page), d, d->domain_id, @@ -2829,7 +2829,7 @@ steal_page(struct domain *d, struct page gdprintk(XENLOG_WARNING, "gnttab_transfer: " "Bad page %p: ed=%p(%u), " "sd=%p(%u)," - " caf=%016x, taf=%" PRtype_info + " caf=%016lx, taf=%" PRtype_info " memflags 0x%x\n", (void *) page_to_mfn(page), d, d->domain_id, @@ -2864,7 +2864,7 @@ steal_page(struct domain *d, struct page fail: spin_unlock(&d->page_alloc_lock); - MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08x, taf=%" PRtype_info, + MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%016lx, taf=%" PRtype_info, (void *)page_to_mfn(page), d, d->domain_id, page_get_owner(page), page->count_info, page->u.inuse.type_info); return -1; @@ -3055,11 +3055,11 @@ void domain_cache_flush (struct domain * //printk ("domain_cache_flush: %d %d pages\n", d->domain_id, nbr_page); } -static void free_page_type(struct page_info *page, u32 type) +static void free_page_type(struct page_info *page, unsigned long type) { } -static int alloc_page_type(struct page_info *page, u32 type) +static int alloc_page_type(struct page_info *page, unsigned long type) { return 1; } @@ -3151,7 +3151,7 @@ static int get_page_from_pagenr(unsigned } -int get_page_type(struct page_info *page, u32 type) +int get_page_type(struct page_info *page, unsigned long type) { u64 nx, x, y = page->u.inuse.type_info; @@ -3199,7 +3199,7 @@ int get_page_type(struct page_info *page { if ( ((x & PGT_type_mask) != PGT_l2_page_table) || (type != PGT_l1_page_table) ) - MEM_LOG("Bad type (saw %08lx != exp %08x) " + MEM_LOG("Bad type (saw %08lx != exp %08lx) " "for mfn %016lx (pfn %016lx)", x, type, page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page))); @@ -3220,8 +3220,8 @@ int get_page_type(struct page_info *page /* Try to validate page type; drop the new reference on failure. */ if ( unlikely(!alloc_page_type(page, type)) ) { - MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x" - ": caf=%08x taf=%" PRtype_info, + MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08lx" + ": caf=%016lx taf=%" PRtype_info, page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)), type, page->count_info, page->u.inuse.type_info); /* Noone else can get a reference. We hold the only ref. */ diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c --- a/xen/arch/ia64/xen/xensetup.c +++ b/xen/arch/ia64/xen/xensetup.c @@ -74,7 +74,6 @@ boolean_param("xencons_poll", opt_xencon unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE; unsigned long xen_pstart; -void *xen_pickle_offset __read_mostly; static void __init parse_xenheap_megabytes(char *s) { @@ -84,9 +83,7 @@ static void __init parse_xenheap_megabyt if (megabytes < XENHEAP_MEGABYTES_MIN) megabytes = XENHEAP_MEGABYTES_MIN; -#define XENHEAP_MEGABYTES_MAX 4096UL /* need more? If so, - __pickle()/__unpickle() must be - revised. */ +#define XENHEAP_MEGABYTES_MAX 4096UL /* need more? */ if (megabytes > XENHEAP_MEGABYTES_MAX) megabytes = XENHEAP_MEGABYTES_MAX; @@ -530,14 +527,6 @@ skip_move: printk("find_memory: efi_memmap_walk returns max_page=%lx\n",max_page); efi_print(); - /* - * later [__init_begin, __init_end) will be freed up as xen heap - * so that struct domain might be allocated from the init area - * which is < xen_heap_start. so we can't simply set - * xen_pickle_offset = xen_heap_start. - */ - xen_pickle_offset = ia64_imva(__init_begin); - xen_heap_start = memguard_init(ia64_imva(&_end)); printk("Before xen_heap_start: %p\n", xen_heap_start); xen_heap_start = __va(init_boot_allocator(__pa(xen_heap_start))); diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h +++ b/xen/include/asm-ia64/mm.h @@ -46,7 +46,7 @@ struct page_info struct list_head list; /* Reference count and various PGC_xxx flags and fields. */ - u32 count_info; + unsigned long count_info; /* Context-dependent fields follow... */ union { @@ -54,10 +54,10 @@ struct page_info /* Page is in use: ((count_info & PGC_count_mask) != 0). */ struct { /* Owner of this page (NULL if page is anonymous). */ - u32 _domain; /* pickled format */ + unsigned long _domain; /* pickled format */ /* Type reference count and various PGT_xxx flags and fields. */ unsigned long type_info; - } __attribute__ ((packed)) inuse; + } inuse; /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ struct { @@ -65,7 +65,7 @@ struct page_info u32 order; /* Mask of possibly-tainted TLBs. */ cpumask_t cpumask; - } __attribute__ ((packed)) free; + } free; } u; @@ -86,50 +86,47 @@ struct page_info * Still small set of flags defined by far on IA-64. * IA-64 should make it a definition same as x86_64. */ +#define PG_shift(idx) (BITS_PER_LONG - (idx)) +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) + /* The following page types are MUTUALLY EXCLUSIVE. */ -#define PGT_none (0UL<<29) /* no special uses of this page */ -#define PGT_l1_page_table (1UL<<29) /* using this page as an L1 page table? */ -#define PGT_l2_page_table (2UL<<29) /* using this page as an L2 page table? */ -#define PGT_l3_page_table (3UL<<29) /* using this page as an L3 page table? */ -#define PGT_l4_page_table (4UL<<29) /* using this page as an L4 page table? */ +#define PGT_none PG_mask(0, 3) /* no special uses of this page */ +#define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */ +#define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */ +#define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */ +#define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */ /* Value 5 reserved. See asm-x86/mm.h */ /* Value 6 reserved. See asm-x86/mm.h */ -#define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */ -#define PGT_type_mask (7UL<<29) /* Bits 29-31. */ +#define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */ +#define PGT_type_mask PG_mask(7, 3) /* Bits 29-31. */ + /* Owning guest has pinned this page to its current type? */ +#define _PGT_pinned PG_shift(4) +#define PGT_pinned PG_mask(1, 4) /* Has this page been validated for use as its current type? */ -#define _PGT_validated 28 -#define PGT_validated (1UL<<_PGT_validated) - /* Owning guest has pinned this page to its current type? */ -#define _PGT_pinned 27 -#define PGT_pinned (1UL<<_PGT_pinned) +#define _PGT_validated PG_shift(5) +#define PGT_validated PG_mask(1, 5) - /* 16-bit count of uses of this frame as its current type. */ -#define PGT_count_mask ((1UL<<16)-1) + /* Count of uses of this frame as its current type. */ +#define PGT_count_width PG_shift(7) +#define PGT_count_mask ((1UL<= paddr_to_pfn(xen_pstart))) #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page)) -extern void* xen_pickle_offset; -#define __pickle(a) ((unsigned long)a - (unsigned long)xen_pickle_offset) -#define __unpickle(a) (void *)(a + xen_pickle_offset) - -static inline struct domain *unpickle_domptr(u64 _d) -{ return (_d == 0) ? NULL : __unpickle(_d); } -static inline u32 pickle_domptr(struct domain *_d) -{ return (_d == NULL) ? 0 : (u32)__pickle(_d); } - -#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain)) -#define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d)) +#define page_get_owner(_p) ((struct domain *)(_p)->u.inuse._domain) +#define page_set_owner(_p, _d) ((_p)->u.inuse._domain = (unsigned long)(_d)) #define XENSHARE_writable 0 #define XENSHARE_readonly 1 @@ -151,23 +148,23 @@ void add_to_domain_alloc_list(unsigned l static inline void put_page(struct page_info *page) { - u32 nx, x, y = page->count_info; + unsigned long nx, x, y = page->count_info; do { - x = y; - nx = x - 1; + x = y; + nx = x - 1; } while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x)); if (unlikely((nx & PGC_count_mask) == 0)) - free_domheap_page(page); + free_domheap_page(page); } /* count_info and ownership are checked atomically. */ static inline int get_page(struct page_info *page, struct domain *domain) { - u32 x, y = page->count_info; + unsigned long x, y = page->count_info; do { x = y; @@ -185,7 +182,7 @@ static inline int get_page(struct page_i fail: /* if (!domain->is_dying) */ /* XXX: header inclusion hell */ gdprintk(XENLOG_INFO, - "Error pfn %lx: rd=%p, od=%p, caf=%016x, taf=%" PRtype_info "\n", + "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%" PRtype_info "\n", page_to_mfn(page), domain, page_get_owner(page), y, page->u.inuse.type_info); return 0; @@ -194,7 +191,7 @@ fail: int is_iomem_page(unsigned long mfn); extern void put_page_type(struct page_info *page); -extern int get_page_type(struct page_info *page, u32 type); +extern int get_page_type(struct page_info *page, unsigned long type); static inline void put_page_and_type(struct page_info *page) { @@ -205,7 +202,7 @@ static inline void put_page_and_type(str static inline int get_page_and_type(struct page_info *page, struct domain *domain, - u32 type) + unsigned long type) { int rc = get_page(page, domain);