# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID d34925e4144bcdadb020ee2deef766a994bf7b04
# Parent 23217792aa3b3cb87036411a68134ffeb5fe105a
Stil more cleanup and moving to 2.6.13 base
diff -r 23217792aa3b -r d34925e4144b xen/arch/ia64/hpsimserial.c
--- a/xen/arch/ia64/hpsimserial.c Wed Aug 31 23:21:24 2005
+++ b/xen/arch/ia64/hpsimserial.c Thu Sep 1 17:09:27 2005
@@ -8,7 +8,7 @@
#include <linux/config.h>
#include <xen/sched.h>
#include <xen/serial.h>
-#include <asm/hpsim_ssc.h>
+#include "hpsim_ssc.h"
static void hp_ski_putc(struct serial_port *port, char c)
{
diff -r 23217792aa3b -r d34925e4144b xen/arch/ia64/linux-xen/README.origin
--- a/xen/arch/ia64/linux-xen/README.origin Wed Aug 31 23:21:24 2005
+++ b/xen/arch/ia64/linux-xen/README.origin Thu Sep 1 17:09:27 2005
@@ -7,6 +7,7 @@
efi.c -> linux/arch/ia64/kernel/efi.c
entry.h -> linux/arch/ia64/kernel/entry.h
entry.S -> linux/arch/ia64/kernel/entry.S
+hpsim_ssc.h -> linux/arch/ia64/hp/sim/hpsim_ssc.h
irq_ia64.c -> linux/arch/ia64/kernel/irq_ia64.c
minstate.h -> linux/arch/ia64/kernel/minstate.h
mm_contig.c -> linux/arch/ia64/mm/contig.c
diff -r 23217792aa3b -r d34925e4144b xen/arch/ia64/process.c
--- a/xen/arch/ia64/process.c Wed Aug 31 23:21:24 2005
+++ b/xen/arch/ia64/process.c Thu Sep 1 17:09:27 2005
@@ -28,8 +28,8 @@
#include <asm/privop.h>
#include <asm/vcpu.h>
#include <asm/ia64_int.h>
-#include <asm/hpsim_ssc.h>
#include <asm/dom_fw.h>
+#include "hpsim_ssc.h"
extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
extern struct ia64_sal_retval pal_emulator_static(UINT64);
diff -r 23217792aa3b -r d34925e4144b xen/arch/ia64/xenmisc.c
--- a/xen/arch/ia64/xenmisc.c Wed Aug 31 23:21:24 2005
+++ b/xen/arch/ia64/xenmisc.c Thu Sep 1 17:09:27 2005
@@ -174,6 +174,34 @@
void free_page_type(struct pfn_info *page, unsigned int type)
{
dummy();
+}
+
+///////////////////////////////
+//// misc memory stuff
+///////////////////////////////
+
+unsigned long __get_free_pages(unsigned int mask, unsigned int order)
+{
+ void *p = alloc_xenheap_pages(order);
+
+ memset(p,0,PAGE_SIZE<<order);
+ return (unsigned long)p;
+}
+
+void __free_pages(struct page *page, unsigned int order)
+{
+ if (order) BUG();
+ free_xenheap_page(page);
+}
+
+void *pgtable_quicklist_alloc(void)
+{
+ return alloc_xenheap_pages(0);
+}
+
+void pgtable_quicklist_free(void *pgtable_entry)
+{
+ free_xenheap_page(pgtable_entry);
}
///////////////////////////////
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/config.h Thu Sep 1 17:09:27 2005
@@ -168,7 +168,9 @@
#define ____cacheline_aligned_in_smp
#define ____cacheline_maxaligned_in_smp
+#ifndef __ASSEMBLY__
#include "asm/types.h" // for u64
+#endif
// warning: unless search_extable is declared, the return value gets
// truncated to 32-bits, causing a very strange error in privop handling
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h
--- a/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h Thu Sep 1 17:09:27 2005
@@ -133,13 +133,17 @@
ia64_intri_res;
\
})
-#define ia64_popcnt(x) \
-({ \
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define ia64_popcnt(x) __builtin_popcountl(x)
+#else
+# define ia64_popcnt(x) \
+ ({ \
__u64 ia64_intri_res; \
asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
\
ia64_intri_res; \
-})
+ })
+#endif
#define ia64_getf_exp(x) \
({ \
@@ -367,66 +371,6 @@
#define ia64_mf() asm volatile ("mf" ::: "memory")
#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
-
-#ifdef CONFIG_VTI
-/*
- * Flushrs instruction stream.
- */
-#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
-
-#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
-
-#define ia64_get_rsc() \
-({ \
- unsigned long val; \
- asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory"); \
- val; \
-})
-
-#define ia64_set_rsc(val) \
- asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_bspstore() \
-({ \
- unsigned long val; \
- asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory"); \
- val; \
-})
-
-#define ia64_set_bspstore(val) \
- asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_rnat() \
-({ \
- unsigned long val; \
- asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory"); \
- val; \
-})
-
-#define ia64_set_rnat(val) \
- asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
-
-#define ia64_ttag(addr)
\
-({
\
- __u64 ia64_intri_res;
\
- asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));
\
- ia64_intri_res;
\
-})
-
-#define ia64_get_dcr() \
-({ \
- __u64 result; \
- asm volatile ("mov %0=cr.dcr" : "=r"(result) : ); \
- result; \
-})
-
-#define ia64_set_dcr(val) \
-({ \
- asm volatile ("mov cr.dcr=%0" :: "r"(val) ); \
-})
-
-#endif // CONFIG_VTI
-
#define ia64_invala() asm volatile ("invala" ::: "memory")
@@ -654,4 +598,8 @@
:: "r"((x)) : "p6", "p7", "memory"); \
} while (0)
+#ifdef XEN
+#include <asm/xengcc_intrin.h>
+#endif
+
#endif /* _ASM_IA64_GCC_INTRIN_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/ia64regs.h
--- a/xen/include/asm-ia64/linux-xen/asm/ia64regs.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/ia64regs.h Thu Sep 1 17:09:27 2005
@@ -87,35 +87,6 @@
#define _IA64_REG_CR_LRR0 4176
#define _IA64_REG_CR_LRR1 4177
-#ifdef CONFIG_VTI
-#define IA64_REG_CR_DCR 0
-#define IA64_REG_CR_ITM 1
-#define IA64_REG_CR_IVA 2
-#define IA64_REG_CR_PTA 8
-#define IA64_REG_CR_IPSR 16
-#define IA64_REG_CR_ISR 17
-#define IA64_REG_CR_IIP 19
-#define IA64_REG_CR_IFA 20
-#define IA64_REG_CR_ITIR 21
-#define IA64_REG_CR_IIPA 22
-#define IA64_REG_CR_IFS 23
-#define IA64_REG_CR_IIM 24
-#define IA64_REG_CR_IHA 25
-#define IA64_REG_CR_LID 64
-#define IA64_REG_CR_IVR 65
-#define IA64_REG_CR_TPR 66
-#define IA64_REG_CR_EOI 67
-#define IA64_REG_CR_IRR0 68
-#define IA64_REG_CR_IRR1 69
-#define IA64_REG_CR_IRR2 70
-#define IA64_REG_CR_IRR3 71
-#define IA64_REG_CR_ITV 72
-#define IA64_REG_CR_PMV 73
-#define IA64_REG_CR_CMCV 74
-#define IA64_REG_CR_LRR0 80
-#define IA64_REG_CR_LRR1 81
-#endif // CONFIG_VTI
-
/* Indirect Registers for getindreg() and setindreg() */
#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
@@ -126,4 +97,8 @@
#define _IA64_REG_INDR_PMD 9005
#define _IA64_REG_INDR_RR 9006
+#ifdef XEN
+#include <asm/xenia64regs.h>
+#endif
+
#endif /* _ASM_IA64_IA64REGS_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/linux-xen/asm/io.h
--- a/xen/include/asm-ia64/linux-xen/asm/io.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/io.h Thu Sep 1 17:09:27 2005
@@ -122,14 +122,6 @@
static inline void ___ia64_mmiowb(void)
{
ia64_mfa();
-}
-
-static inline const unsigned long
-__ia64_get_io_port_base (void)
-{
- extern unsigned long ia64_iobase;
-
- return ia64_iobase;
}
static inline void*
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/linux-xen/asm/kregs.h
--- a/xen/include/asm-ia64/linux-xen/asm/kregs.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/kregs.h Thu Sep 1 17:09:27 2005
@@ -29,21 +29,8 @@
*/
#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code
& data) */
#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as
required by EFI */
-#ifdef CONFIG_VTI
-#define IA64_TR_XEN_IN_DOM 6 /* itr6, dtr6: Double mapping for xen
image in domain space */
-#endif // CONFIG_VTI
#define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */
#define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- &
register-stacks */
-#ifdef XEN
-#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */
-#define IA64_TR_VHPT 4 /* dtr4: vhpt */
-#define IA64_TR_ARCH_INFO 5
-#ifdef CONFIG_VTI
-#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table
in domain space */
-#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch
stub */
-#define IA64_TEMP_PHYSICAL 8 /* itr8, dtr8: temp mapping for guest
physical memory 256M */
-#endif // CONFIG_VTI
-#endif
/* Processor status register bits: */
#define IA64_PSR_BE_BIT 1
@@ -79,9 +66,6 @@
#define IA64_PSR_ED_BIT 43
#define IA64_PSR_BN_BIT 44
#define IA64_PSR_IA_BIT 45
-#ifdef CONFIG_VTI
-#define IA64_PSR_VM_BIT 46
-#endif // CONFIG_VTI
/* A mask of PSR bits that we generally don't want to inherit across a
clone2() or an
execve(). Only list flags here that need to be cleared/set for BOTH
clone2() and
@@ -123,9 +107,6 @@
#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
-#ifdef CONFIG_VTI
-#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
-#endif // CONFIG_VTI
/* User mask bits: */
#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL
| IA64_PSR_MFH)
@@ -180,20 +161,7 @@
#define IA64_ISR_CODE_PROBEF 5
#ifdef XEN
-/* Interruption Function State */
-#define IA64_IFS_V_BIT 63
-#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
-
-/* Page Table Address */
-#define IA64_PTA_VE_BIT 0
-#define IA64_PTA_SIZE_BIT 2
-#define IA64_PTA_VF_BIT 8
-#define IA64_PTA_BASE_BIT 15
-
-#define IA64_PTA_VE (__IA64_UL(1) << IA64_PTA_VE_BIT)
-#define IA64_PTA_SIZE (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
-#define IA64_PTA_VF (__IA64_UL(1) << IA64_PTA_VF_BIT)
-#define IA64_PTA_BASE (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
+#include <asm/xenkregs.h>
#endif
#endif /* _ASM_IA64_kREGS_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/mca_asm.h
--- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h Thu Sep 1 17:09:27 2005
@@ -29,10 +29,10 @@
#ifdef XEN
#define INST_VA_TO_PA(addr)
\
dep addr = 0, addr, 60, 4
-#else // XEN
+#else
#define INST_VA_TO_PA(addr)
\
dep addr = 0, addr, 61, 3
-#endif // XEN
+#endif
/*
* This macro converts a data virtual address to a physical address
* Right now for simulation purposes the virtual addresses are
@@ -51,15 +51,19 @@
#define DATA_PA_TO_VA(addr,temp)
\
mov temp = 0xf ;;
\
dep addr = temp, addr, 60, 4
-#else // XEN
+#else
#define DATA_PA_TO_VA(addr,temp)
\
mov temp = 0x7 ;;
\
dep addr = temp, addr, 61, 3
-#endif // XEN
-
+#endif
+
+#ifdef XEN
+//FIXME LATER
+#else
#define GET_THIS_PADDR(reg, var) \
mov reg = IA64_KR(PER_CPU_DATA);; \
addl reg = THIS_CPU(var), reg
+#endif
/*
* This macro jumps to the instruction at the given virtual address
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/linux-xen/asm/page.h
--- a/xen/include/asm-ia64/linux-xen/asm/page.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h Thu Sep 1 17:09:27 2005
@@ -32,7 +32,6 @@
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area
*/
-
#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per
region addr limit */
@@ -96,15 +95,9 @@
#endif
#ifndef CONFIG_DISCONTIGMEM
-#ifdef XEN
-# define pfn_valid(pfn) (0)
-# define page_to_pfn(_page) ((unsigned long)((_page) - frame_table))
-# define pfn_to_page(_pfn) (frame_table + (_pfn))
-#else
# define pfn_valid(pfn) (((pfn) < max_mapnr) &&
ia64_pfn_valid(pfn))
# define page_to_pfn(page) ((unsigned long) (page - mem_map))
# define pfn_to_page(pfn) (mem_map + (pfn))
-#endif
#else
extern struct page *vmem_map;
extern unsigned long max_low_pfn;
@@ -115,11 +108,6 @@
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-
-#ifdef XEN
-#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
-#define phys_to_page(kaddr) pfn_to_page(((kaddr) >> PAGE_SHIFT))
-#endif
typedef union ia64_va {
struct {
@@ -136,23 +124,8 @@
* expressed in this way to ensure they result in a single "dep"
* instruction.
*/
-#ifdef XEN
-typedef union xen_va {
- struct {
- unsigned long off : 60;
- unsigned long reg : 4;
- } f;
- unsigned long l;
- void *p;
-} xen_va;
-
-// xen/drivers/console.c uses __va in a declaration (should be fixed!)
-#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0;
_v.l;})
-#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1;
_v.p;})
-#else
#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0;
_v.l;})
#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1;
_v.p;})
-#endif
#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
@@ -164,9 +137,9 @@
# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61)
\
| (REGION_OFFSET(x) >>
(HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-# define is_hugepage_only_range(addr, len) \
+# define is_hugepage_only_range(mm, addr, len) \
(REGION_NUMBER(addr) == REGION_HPAGE && \
- REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
+ REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
extern unsigned int hpage_shift;
#endif
@@ -224,15 +197,15 @@
# define __pgprot(x) (x)
#endif /* !STRICT_MM_TYPECHECKS */
-#ifdef XEN
-#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
-#else
#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
-#endif
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE |
\
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC
| \
(((current->personality &
READ_IMPLIES_EXEC) != 0) \
? VM_EXEC : 0))
+#ifdef XEN
+#include <asm/xenpage.h>
+#endif
+
#endif /* _ASM_IA64_PAGE_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/pgalloc.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h Thu Sep 1 17:09:27 2005
@@ -21,176 +21,127 @@
#include <linux/threads.h>
#include <asm/mmu_context.h>
-#include <asm/processor.h>
-/*
- * Very stupidly, we used to get new pgd's and pmd's, init their contents
- * to point to the NULL versions of the next level page table, later on
- * completely re-init them the same way, then free them up. This wasted
- * a lot of work and caused unnecessary memory traffic. How broken...
- * We fix this by caching them.
- */
-#define pgd_quicklist (local_cpu_data->pgd_quick)
-#define pmd_quicklist (local_cpu_data->pmd_quick)
-#define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
+#ifndef XEN
+DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
+#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
+DECLARE_PER_CPU(long, __pgtable_quicklist_size);
+#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
-static inline pgd_t*
-pgd_alloc_one_fast (struct mm_struct *mm)
+static inline long pgtable_quicklist_total_size(void)
+{
+ long ql_size = 0;
+ int cpuid;
+
+ for_each_online_cpu(cpuid) {
+ ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
+ }
+ return ql_size;
+}
+
+static inline void *pgtable_quicklist_alloc(void)
{
unsigned long *ret = NULL;
preempt_disable();
- ret = pgd_quicklist;
+ ret = pgtable_quicklist;
if (likely(ret != NULL)) {
- pgd_quicklist = (unsigned long *)(*ret);
+ pgtable_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
- --pgtable_cache_size;
- } else
- ret = NULL;
+ --pgtable_quicklist_size;
+ preempt_enable();
+ } else {
+ preempt_enable();
+ ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ }
- preempt_enable();
-
- return (pgd_t *) ret;
+ return ret;
}
-static inline pgd_t*
-pgd_alloc (struct mm_struct *mm)
+static inline void pgtable_quicklist_free(void *pgtable_entry)
{
- /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
- pgd_t *pgd = pgd_alloc_one_fast(mm);
+#ifdef CONFIG_NUMA
+ unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
- if (unlikely(pgd == NULL)) {
-#ifdef XEN
- pgd = (pgd_t *)alloc_xenheap_page();
- memset(pgd,0,PAGE_SIZE);
-#else
- pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+ if (unlikely(nid != numa_node_id())) {
+ free_page((unsigned long)pgtable_entry);
+ return;
+ }
#endif
- }
- return pgd;
+
+ preempt_disable();
+ *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
+ pgtable_quicklist = (unsigned long *)pgtable_entry;
+ ++pgtable_quicklist_size;
+ preempt_enable();
+}
+#endif
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return pgtable_quicklist_alloc();
+}
+
+static inline void pgd_free(pgd_t * pgd)
+{
+ pgtable_quicklist_free(pgd);
}
static inline void
-pgd_free (pgd_t *pgd)
-{
- preempt_disable();
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- ++pgtable_cache_size;
- preempt_enable();
-}
-
-static inline void
-pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
+pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
{
pud_val(*pud_entry) = __pa(pmd);
}
-static inline pmd_t*
-pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- unsigned long *ret = NULL;
-
- preempt_disable();
-
- ret = (unsigned long *)pmd_quicklist;
- if (likely(ret != NULL)) {
- pmd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- --pgtable_cache_size;
- }
-
- preempt_enable();
-
- return (pmd_t *)ret;
+ return pgtable_quicklist_alloc();
}
-static inline pmd_t*
-pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
+static inline void pmd_free(pmd_t * pmd)
{
-#ifdef XEN
- pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
- memset(pmd,0,PAGE_SIZE);
-#else
- pmd_t *pmd = (pmd_t
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-#endif
-
- return pmd;
-}
-
-static inline void
-pmd_free (pmd_t *pmd)
-{
- preempt_disable();
- *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
- pmd_quicklist = (unsigned long *) pmd;
- ++pgtable_cache_size;
- preempt_enable();
+ pgtable_quicklist_free(pmd);
}
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
static inline void
-pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
+pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
static inline void
-pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
{
pmd_val(*pmd_entry) = __pa(pte);
}
-static inline struct page *
-pte_alloc_one (struct mm_struct *mm, unsigned long addr)
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long addr)
{
-#ifdef XEN
- struct page *pte = alloc_xenheap_page();
- memset(pte,0,PAGE_SIZE);
-#else
- struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-#endif
-
- return pte;
+ return virt_to_page(pgtable_quicklist_alloc());
}
-static inline pte_t *
-pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long addr)
{
-#ifdef XEN
- pte_t *pte = (pte_t *)alloc_xenheap_page();
- memset(pte,0,PAGE_SIZE);
-#else
- pte_t *pte = (pte_t
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-#endif
-
- return pte;
+ return pgtable_quicklist_alloc();
}
-static inline void
-pte_free (struct page *pte)
+static inline void pte_free(struct page *pte)
{
-#ifdef XEN
- free_xenheap_page(pte);
-#else
- __free_page(pte);
-#endif
+ pgtable_quicklist_free(page_address(pte));
}
-static inline void
-pte_free_kernel (pte_t *pte)
+static inline void pte_free_kernel(pte_t * pte)
{
-#ifdef XEN
- free_xenheap_page((unsigned long) pte);
-#else
- free_page((unsigned long) pte);
-#endif
+ pgtable_quicklist_free(pte);
}
-#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
+#define __pte_free_tlb(tlb, pte) pte_free(pte)
-extern void check_pgt_cache (void);
+extern void check_pgt_cache(void);
-#endif /* _ASM_IA64_PGALLOC_H */
+#endif /* _ASM_IA64_PGALLOC_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/processor.h
--- a/xen/include/asm-ia64/linux-xen/asm/processor.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/processor.h Thu Sep 1 17:09:27 2005
@@ -43,14 +43,6 @@
#define TASK_SIZE (current->thread.task_size)
/*
- * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a
mapping for
- * address-space MM. Note that with 32-bit tasks, this is still
DEFAULT_TASK_SIZE,
- * because the kernel may have installed helper-mappings above TASK_SIZE. For
example,
- * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
- */
-#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
-
-/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
@@ -94,11 +86,10 @@
#ifdef CONFIG_NUMA
#include <asm/nodedata.h>
#endif
+
#ifdef XEN
#include <asm/xenprocessor.h>
-#endif
-
-#ifndef XEN
+#else
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
__u64 reserved0 : 1;
@@ -150,9 +141,6 @@
__u64 nsec_per_cyc; /*
(1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits
(from PAL) */
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits
(from PAL) */
- __u64 *pgd_quick;
- __u64 *pmd_quick;
- __u64 pgtable_cache_sz;
__u64 itc_freq; /* frequency of ITC counter */
__u64 proc_freq; /* frequency of processor */
__u64 cyc_per_usec; /* itc_freq/1000000 */
@@ -189,22 +177,6 @@
};
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
-
-typedef union {
- struct {
- __u64 kr0;
- __u64 kr1;
- __u64 kr2;
- __u64 kr3;
- __u64 kr4;
- __u64 kr5;
- __u64 kr6;
- __u64 kr7;
- };
- __u64 _kr[8];
-} cpu_kr_ia64_t;
-
-DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
/*
* The "local" data variable. It refers to the per-CPU data of the currently
executing
@@ -435,7 +407,10 @@
* task_struct at this point.
*/
-/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
+/*
+ * Return TRUE if task T owns the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
#ifndef XEN
#define ia64_is_local_fpu_owner(t)
\
({
\
@@ -445,7 +420,10 @@
})
#endif
-/* Mark task T as owning the fph partition of the CPU we're running on. */
+/*
+ * Mark task T as owning the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
#define ia64_set_local_fpu_owner(t) do {
\
struct task_struct *__ia64_slfo_task = (t);
\
__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();
\
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/spinlock.h
--- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h Thu Sep 1 17:09:27 2005
@@ -120,35 +120,6 @@
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
-#ifdef XEN
-/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
- * reentered recursively on the same CPU. All critical regions that may form
- * part of a recursively-nested set must be protected by these forms. If there
- * are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
- */
-#define _raw_spin_lock_recursive(_lock) \
- do { \
- int cpu = smp_processor_id(); \
- if ( likely((_lock)->recurse_cpu != cpu) ) \
- { \
- spin_lock(_lock); \
- (_lock)->recurse_cpu = cpu; \
- } \
- (_lock)->recurse_cnt++; \
- } while ( 0 )
-
-#define _raw_spin_unlock_recursive(_lock) \
- do { \
- if ( likely(--(_lock)->recurse_cnt == 0) ) \
- { \
- (_lock)->recurse_cpu = -1; \
- spin_unlock(_lock); \
- } \
- } while ( 0 )
-#endif
-
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
@@ -238,4 +209,7 @@
clear_bit(31, (x));
\
})
+#ifdef XEN
+#include <asm/xenspinlock.h>
+#endif
#endif /* _ASM_IA64_SPINLOCK_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/linux-xen/asm/system.h
--- a/xen/include/asm-ia64/linux-xen/asm/system.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h Thu Sep 1 17:09:27 2005
@@ -18,19 +18,14 @@
#include <asm/page.h>
#include <asm/pal.h>
#include <asm/percpu.h>
-#ifdef XEN
-#include <asm/xensystem.h>
-#endif
#define GATE_ADDR __IA64_UL_CONST(0xa000000000000000)
/*
* 0xa000000000000000+2*PERCPU_PAGE_SIZE
* - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
*/
-#ifndef XEN
#define KERNEL_START __IA64_UL_CONST(0xa000000100000000)
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
-#endif
#ifndef __ASSEMBLY__
@@ -188,8 +183,6 @@
#ifdef __KERNEL__
-#define prepare_to_switch() do { } while(0)
-
#ifdef CONFIG_IA32_SUPPORT
# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
#else
@@ -223,7 +216,6 @@
# define PERFMON_IS_SYSWIDE() (0)
#endif
-#ifndef XEN
#define IA64_HAS_EXTRA_STATE(t)
\
((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)
\
|| IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
@@ -236,7 +228,6 @@
ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);
\
(last) = ia64_switch_to((next));
\
} while (0)
-#endif
#ifdef CONFIG_SMP
/*
@@ -247,9 +238,9 @@
*/
# define switch_to(prev,next,last) do {
\
if (ia64_psr(ia64_task_regs(prev))->mfh &&
ia64_is_local_fpu_owner(prev)) { \
- /* ia64_psr(ia64_task_regs(prev))->mfh = 0; */
\
- /* (prev)->thread.flags |= IA64_THREAD_FPH_VALID; */
\
- /* __ia64_save_fpu((prev)->thread.fph); */
\
+ ia64_psr(ia64_task_regs(prev))->mfh = 0;
\
+ (prev)->thread.flags |= IA64_THREAD_FPH_VALID;
\
+ __ia64_save_fpu((prev)->thread.fph);
\
}
\
__switch_to(prev, next, last);
\
} while (0)
@@ -281,19 +272,20 @@
* of that CPU which will not be released, because there we wait for the
* tasklist_lock to become available.
*/
-#define prepare_arch_switch(rq, next) \
-do { \
- spin_lock(&(next)->switch_lock); \
- spin_unlock(&(rq)->lock); \
-} while (0)
-#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
-#define task_running(rq, p) ((rq)->curr == (p) ||
spin_is_locked(&(p)->switch_lock))
+#define __ARCH_WANT_UNLOCKED_CTXSW
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
void cpu_idle_wait(void);
+
+#define arch_align_stack(x) (x)
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
+#ifdef XEN
+#include <asm/xensystem.h>
+#endif
+
#endif /* _ASM_IA64_SYSTEM_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/tlbflush.h
--- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h Thu Sep 1 17:09:27 2005
@@ -37,6 +37,7 @@
local_finish_flush_tlb_mm (struct mm_struct *mm)
{
#ifndef XEN
+// FIXME SMP?
if (mm == current->active_mm)
activate_context(mm);
#endif
@@ -54,6 +55,7 @@
return;
#ifndef XEN
+// FIXME SMP?
mm->context = 0;
#endif
@@ -81,6 +83,7 @@
if (vma->vm_mm == current->active_mm)
ia64_ptcl(addr, (PAGE_SHIFT << 2));
#ifndef XEN
+// FIXME SMP?
else
vma->vm_mm->context = 0;
#endif
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/linux-xen/asm/types.h
--- a/xen/include/asm-ia64/linux-xen/asm/types.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/types.h Thu Sep 1 17:09:27 2005
@@ -1,12 +1,5 @@
#ifndef _ASM_IA64_TYPES_H
#define _ASM_IA64_TYPES_H
-#ifdef XEN
-#ifndef __ASSEMBLY__
-typedef unsigned long ssize_t;
-typedef unsigned long size_t;
-typedef long long loff_t;
-#endif
-#endif
/*
* This file is never included by application software unless explicitly
requested (e.g.,
@@ -68,28 +61,6 @@
typedef __s64 s64;
typedef __u64 u64;
-#ifdef XEN
-/*
- * Below are truly Linux-specific types that should never collide with
- * any application/library that wants linux/types.h.
- */
-
-#ifdef __CHECKER__
-#define __bitwise __attribute__((bitwise))
-#else
-#define __bitwise
-#endif
-
-typedef __u16 __bitwise __le16;
-typedef __u16 __bitwise __be16;
-typedef __u32 __bitwise __le32;
-typedef __u32 __bitwise __be32;
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __u64 __bitwise __le64;
-typedef __u64 __bitwise __be64;
-#endif
-#endif
-
#define BITS_PER_LONG 64
/* DMA addresses are 64-bits wide, in general. */
@@ -101,4 +72,8 @@
# endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
+#ifdef XEN
+#include <asm/xentypes.h>
+#endif
+
#endif /* _ASM_IA64_TYPES_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/uaccess.h
--- a/xen/include/asm-ia64/linux-xen/asm/uaccess.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/uaccess.h Thu Sep 1 17:09:27 2005
@@ -32,16 +32,15 @@
* David Mosberger-Tang <davidm@xxxxxxxxxx>
*/
-#ifdef CONFIG_VTI
-#include <asm/vmx_uaccess.h>
-#else // CONFIG_VTI
-
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
#include <asm/intrinsics.h>
#include <asm/pgtable.h>
+#include <asm/io.h>
/*
* For historical reasons, the following macros are grossly misnamed:
@@ -65,7 +64,6 @@
* point inside the virtually mapped linear page table.
*/
#ifdef XEN
-/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned
long)(addr)))
#else
@@ -79,7 +77,8 @@
#endif
#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
-static inline int
+/* this function will go away soon - use access_ok() instead */
+static inline int __deprecated
verify_area (int type, const void __user *addr, unsigned long size)
{
return access_ok(type, addr, size) ? 0 : -EFAULT;
@@ -353,7 +352,6 @@
__su_ret; \
})
-#endif // CONFIG_VTI
/* Generic code can't deal with the location-relative format that we use for
compactness. */
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
@@ -378,4 +376,40 @@
return 0;
}
+#ifndef XEN
+#define ARCH_HAS_TRANSLATE_MEM_PTR 1
+static __inline__ char *
+xlate_dev_mem_ptr (unsigned long p)
+{
+ struct page *page;
+ char * ptr;
+
+ page = pfn_to_page(p >> PAGE_SHIFT);
+ if (PageUncached(page))
+ ptr = (char *)p + __IA64_UNCACHED_OFFSET;
+ else
+ ptr = __va(p);
+
+ return ptr;
+}
+
+/*
+ * Convert a virtual cached kernel memory pointer to an uncached pointer
+ */
+static __inline__ char *
+xlate_dev_kmem_ptr (char * p)
+{
+ struct page *page;
+ char * ptr;
+
+ page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
+ if (PageUncached(page))
+ ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
+ else
+ ptr = p;
+
+ return ptr;
+}
+#endif
+
#endif /* _ASM_IA64_UACCESS_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/linux/cpumask.h
--- a/xen/include/asm-ia64/linux-xen/linux/cpumask.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/cpumask.h Thu Sep 1 17:09:27 2005
@@ -10,6 +10,8 @@
*
* For details of cpumask_scnprintf() and cpumask_parse(),
* see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ * For details of cpulist_scnprintf() and cpulist_parse(), see
+ * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
*
* The available cpumask operations are:
*
@@ -46,6 +48,8 @@
*
* int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
* int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
+ * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
+ * int cpulist_parse(buf, map) Parse ascii string as cpulist
*
* for_each_cpu_mask(cpu, mask) for-loop cpu over mask
*
@@ -268,12 +272,26 @@
return bitmap_scnprintf(buf, len, srcp->bits, nbits);
}
-#define cpumask_parse(ubuf, ulen, src) \
- __cpumask_parse((ubuf), (ulen), &(src), NR_CPUS)
+#define cpumask_parse(ubuf, ulen, dst) \
+ __cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
static inline int __cpumask_parse(const char __user *buf, int len,
cpumask_t *dstp, int nbits)
{
return bitmap_parse(buf, len, dstp->bits, nbits);
+}
+
+#define cpulist_scnprintf(buf, len, src) \
+ __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
+static inline int __cpulist_scnprintf(char *buf, int len,
+ const cpumask_t *srcp, int nbits)
+{
+ return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
+}
+
+#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
+static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
+{
+ return bitmap_parselist(buf, dstp->bits, nbits);
}
#if NR_CPUS > 1
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/linux/hardirq.h
--- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h Thu Sep 1 17:09:27 2005
@@ -2,6 +2,7 @@
#define LINUX_HARDIRQ_H
#include <linux/config.h>
+#include <linux/preempt.h>
#include <linux/smp_lock.h>
#include <asm/hardirq.h>
#include <asm/system.h>
@@ -43,12 +44,18 @@
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
+#ifndef XEN
+#error PREEMPT_ACTIVE is too low!
+#endif
+#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
@@ -60,10 +67,10 @@
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
-#ifndef XEN
+#ifdef XEN
+#define in_interrupt() 0 // FIXME SMP LATER
+#else
#define in_interrupt() (irq_count())
-#else
-#define in_interrupt() 0 // FIXME LATER
#endif
#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/linux/interrupt.h
--- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h Thu Sep 1 17:09:27 2005
@@ -123,7 +123,9 @@
};
asmlinkage void do_softirq(void);
-//extern void open_softirq(int nr, void (*action)(struct softirq_action*),
void *data);
+#ifndef XEN
+extern void open_softirq(int nr, void (*action)(struct softirq_action*), void
*data);
+#endif
extern void softirq_init(void);
#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL <<
(nr); } while (0)
extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xenprocessor.h
--- a/xen/include/asm-ia64/xenprocessor.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xenprocessor.h Thu Sep 1 17:09:27 2005
@@ -213,4 +213,20 @@
ret; \
})
+typedef union {
+ struct {
+ __u64 kr0;
+ __u64 kr1;
+ __u64 kr2;
+ __u64 kr3;
+ __u64 kr4;
+ __u64 kr5;
+ __u64 kr6;
+ __u64 kr7;
+ };
+ __u64 _kr[8];
+} cpu_kr_ia64_t;
+
+DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
+
#endif // _ASM_IA64_XENPROCESSOR_H
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xensystem.h Thu Sep 1 17:09:27 2005
@@ -22,7 +22,9 @@
#endif // CONFIG_VTI
#define XEN_START_ADDR 0xf000000000000000
+#undef KERNEL_START
#define KERNEL_START 0xf000000004000000
+#undef PERCPU_ADDR
#define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE
#define SHAREDINFO_ADDR 0xf100000000000000
#define VHPT_ADDR 0xf200000000000000
@@ -31,8 +33,10 @@
#ifndef __ASSEMBLY__
+#undef IA64_HAS_EXTRA_STATE
#define IA64_HAS_EXTRA_STATE(t) 0
+#undef __switch_to
#ifdef CONFIG_VTI
extern struct task_struct *vmx_ia64_switch_to (void *next_task);
#define __switch_to(prev,next,last) do { \
diff -r 23217792aa3b -r d34925e4144b xen/arch/ia64/linux-xen/hpsim_ssc.h
--- /dev/null Wed Aug 31 23:21:24 2005
+++ b/xen/arch/ia64/linux-xen/hpsim_ssc.h Thu Sep 1 17:09:27 2005
@@ -0,0 +1,55 @@
+/*
+ * Platform dependent support for HP simulator.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
+ * Copyright (C) 1999 Vijay Chander <vijay@xxxxxxxxxxxx>
+ */
+#ifndef _IA64_PLATFORM_HPSIM_SSC_H
+#define _IA64_PLATFORM_HPSIM_SSC_H
+
+/* Simulator system calls: */
+
+#define SSC_CONSOLE_INIT 20
+#define SSC_GETCHAR 21
+#define SSC_PUTCHAR 31
+#define SSC_CONNECT_INTERRUPT 58
+#define SSC_GENERATE_INTERRUPT 59
+#define SSC_SET_PERIODIC_INTERRUPT 60
+#define SSC_GET_RTC 65
+#define SSC_EXIT 66
+#define SSC_LOAD_SYMBOLS 69
+#define SSC_GET_TOD 74
+#define SSC_CTL_TRACE 76
+
+#define SSC_NETDEV_PROBE 100
+#define SSC_NETDEV_SEND 101
+#define SSC_NETDEV_RECV 102
+#define SSC_NETDEV_ATTACH 103
+#define SSC_NETDEV_DETACH 104
+
+/*
+ * Simulator system call.
+ */
+extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
+
+#ifdef XEN
+/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
+ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
+#define SSC_OPEN 50
+#define SSC_CLOSE 51
+#define SSC_READ 52
+#define SSC_WRITE 53
+#define SSC_GET_COMPLETION 54
+#define SSC_WAIT_COMPLETION 55
+
+#define SSC_WRITE_ACCESS 2
+#define SSC_READ_ACCESS 1
+
+struct ssc_disk_req {
+ unsigned long addr;
+ unsigned long len;
+};
+#endif
+
+#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xengcc_intrin.h
--- /dev/null Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xengcc_intrin.h Thu Sep 1 17:09:27 2005
@@ -0,0 +1,59 @@
+#ifndef _ASM_IA64_XENGCC_INTRIN_H
+#define _ASM_IA64_XENGCC_INTRIN_H
+/*
+ * Flushrs instruction stream.
+ */
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
+#define ia64_get_rsc() \
+({ \
+ unsigned long val; \
+ asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory"); \
+ val; \
+})
+
+#define ia64_set_rsc(val) \
+ asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_bspstore() \
+({ \
+ unsigned long val; \
+ asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory"); \
+ val; \
+})
+
+#define ia64_set_bspstore(val) \
+ asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_rnat() \
+({ \
+ unsigned long val; \
+ asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory"); \
+ val; \
+})
+
+#define ia64_set_rnat(val) \
+ asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
+
+#define ia64_ttag(addr)
\
+({
\
+ __u64 ia64_intri_res;
\
+ asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));
\
+ ia64_intri_res;
\
+})
+
+#define ia64_get_dcr() \
+({ \
+ __u64 result; \
+ asm volatile ("mov %0=cr.dcr" : "=r"(result) : ); \
+ result; \
+})
+
+#define ia64_set_dcr(val) \
+({ \
+ asm volatile ("mov cr.dcr=%0" :: "r"(val) ); \
+})
+
+#endif /* _ASM_IA64_XENGCC_INTRIN_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xenia64regs.h
--- /dev/null Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xenia64regs.h Thu Sep 1 17:09:27 2005
@@ -0,0 +1,31 @@
+#ifndef _ASM_IA64_XENIA64REGS_H
+#define _ASM_IA64_XENIA64REGS_H
+
+#define IA64_REG_CR_DCR 0
+#define IA64_REG_CR_ITM 1
+#define IA64_REG_CR_IVA 2
+#define IA64_REG_CR_PTA 8
+#define IA64_REG_CR_IPSR 16
+#define IA64_REG_CR_ISR 17
+#define IA64_REG_CR_IIP 19
+#define IA64_REG_CR_IFA 20
+#define IA64_REG_CR_ITIR 21
+#define IA64_REG_CR_IIPA 22
+#define IA64_REG_CR_IFS 23
+#define IA64_REG_CR_IIM 24
+#define IA64_REG_CR_IHA 25
+#define IA64_REG_CR_LID 64
+#define IA64_REG_CR_IVR 65
+#define IA64_REG_CR_TPR 66
+#define IA64_REG_CR_EOI 67
+#define IA64_REG_CR_IRR0 68
+#define IA64_REG_CR_IRR1 69
+#define IA64_REG_CR_IRR2 70
+#define IA64_REG_CR_IRR3 71
+#define IA64_REG_CR_ITV 72
+#define IA64_REG_CR_PMV 73
+#define IA64_REG_CR_CMCV 74
+#define IA64_REG_CR_LRR0 80
+#define IA64_REG_CR_LRR1 81
+
+#endif /* _ASM_IA64_XENIA64REGS_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xenkregs.h
--- /dev/null Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xenkregs.h Thu Sep 1 17:09:27 2005
@@ -0,0 +1,37 @@
+#ifndef _ASM_IA64_XENKREGS_H
+#define _ASM_IA64_XENKREGS_H
+
+/*
+ * Translation registers:
+ */
+#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */
+#define IA64_TR_VHPT 4 /* dtr4: vhpt */
+#define IA64_TR_ARCH_INFO 5
+
+#ifdef CONFIG_VTI
+#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table
in domain space */
+#define IA64_TR_XEN_IN_DOM 6 /* itr6, dtr6: Double mapping for xen
image in domain space */
+#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch
stub */
+#define IA64_TEMP_PHYSICAL 8 /* itr8, dtr8: temp mapping for guest
physical memory 256M */
+#endif // CONFIG_VTI
+
+/* Processor status register bits: */
+#define IA64_PSR_VM_BIT 46
+#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
+
+/* Interruption Function State */
+#define IA64_IFS_V_BIT 63
+#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
+
+/* Page Table Address */
+#define IA64_PTA_VE_BIT 0
+#define IA64_PTA_SIZE_BIT 2
+#define IA64_PTA_VF_BIT 8
+#define IA64_PTA_BASE_BIT 15
+
+#define IA64_PTA_VE (__IA64_UL(1) << IA64_PTA_VE_BIT)
+#define IA64_PTA_SIZE (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
+#define IA64_PTA_VF (__IA64_UL(1) << IA64_PTA_VF_BIT)
+#define IA64_PTA_BASE (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
+
+#endif /* _ASM_IA64_XENKREGS_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xenpage.h
--- /dev/null Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xenpage.h Thu Sep 1 17:09:27 2005
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_XENPAGE_H
+#define _ASM_IA64_XENPAGE_H
+
+#ifdef CONFIG_DISCONTIGMEM
+#error "xenpage.h: page macros need to be defined for CONFIG_DISCONTIGMEM"
+#endif
+
+#undef pfn_valid
+#undef page_to_pfn
+#undef pfn_to_page
+# define pfn_valid(pfn) (0)
+# define page_to_pfn(_page) ((unsigned long) ((_page) - frame_table))
+# define pfn_to_page(_pfn) (frame_table + (_pfn))
+
+#undef page_to_phys
+#undef virt_to_page
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
+#define phys_to_page(kaddr) pfn_to_page(((kaddr) >> PAGE_SHIFT))
+
+#ifndef __ASSEMBLY__
+typedef union xen_va {
+ struct {
+ unsigned long off : 60;
+ unsigned long reg : 4;
+ } f;
+ unsigned long l;
+ void *p;
+} xen_va;
+#endif
+
+#undef __pa
+#undef __va
+#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0;
_v.l;})
+#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1;
_v.p;})
+
+#undef PAGE_OFFSET
+#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
+
+#endif /* _ASM_IA64_XENPAGE_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xenspinlock.h
--- /dev/null Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xenspinlock.h Thu Sep 1 17:09:27 2005
@@ -0,0 +1,30 @@
+#ifndef _ASM_IA64_XENSPINLOCK_H
+#define _ASM_IA64_XENSPINLOCK_H
+
+/*
+ * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
+ * reentered recursively on the same CPU. All critical regions that may form
+ * part of a recursively-nested set must be protected by these forms. If there
+ * are any critical regions that cannot form part of such a set, they can use
+ * standard spin_[un]lock().
+ */
+#define _raw_spin_lock_recursive(_lock) \
+ do { \
+ int cpu = smp_processor_id(); \
+ if ( likely((_lock)->recurse_cpu != cpu) ) \
+ { \
+ spin_lock(_lock); \
+ (_lock)->recurse_cpu = cpu; \
+ } \
+ (_lock)->recurse_cnt++; \
+ } while ( 0 )
+
+#define _raw_spin_unlock_recursive(_lock) \
+ do { \
+ if ( likely(--(_lock)->recurse_cnt == 0) ) \
+ { \
+ (_lock)->recurse_cpu = -1; \
+ spin_unlock(_lock); \
+ } \
+ } while ( 0 )
+#endif /* _ASM_IA64_XENSPINLOCK_H */
diff -r 23217792aa3b -r d34925e4144b xen/include/asm-ia64/xentypes.h
--- /dev/null Wed Aug 31 23:21:24 2005
+++ b/xen/include/asm-ia64/xentypes.h Thu Sep 1 17:09:27 2005
@@ -0,0 +1,29 @@
+#ifndef _ASM_IA64_XENTYPES_H
+#define _ASM_IA64_XENTYPES_H
+
+#ifndef __ASSEMBLY__
+typedef unsigned long ssize_t;
+typedef unsigned long size_t;
+typedef long long loff_t;
+
+#ifdef __KERNEL__
+/* these lines taken from linux/types.h. they belong in xen/types.h */
+#ifdef __CHECKER__
+#define __bitwise __attribute__((bitwise))
+#else
+#define __bitwise
+#endif
+
+typedef __u16 __bitwise __le16;
+typedef __u16 __bitwise __be16;
+typedef __u32 __bitwise __le32;
+typedef __u32 __bitwise __be32;
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __u64 __bitwise __le64;
+typedef __u64 __bitwise __be64;
+#endif
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_XENTYPES_H */
diff -r 23217792aa3b -r d34925e4144b
xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h
--- a/xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h Wed Aug 31 23:21:24 2005
+++ /dev/null Thu Sep 1 17:09:27 2005
@@ -1,55 +0,0 @@
-/*
- * Platform dependent support for HP simulator.
- *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@xxxxxxxxxx>
- * Copyright (C) 1999 Vijay Chander <vijay@xxxxxxxxxxxx>
- */
-#ifndef _IA64_PLATFORM_HPSIM_SSC_H
-#define _IA64_PLATFORM_HPSIM_SSC_H
-
-/* Simulator system calls: */
-
-#define SSC_CONSOLE_INIT 20
-#define SSC_GETCHAR 21
-#define SSC_PUTCHAR 31
-#define SSC_CONNECT_INTERRUPT 58
-#define SSC_GENERATE_INTERRUPT 59
-#define SSC_SET_PERIODIC_INTERRUPT 60
-#define SSC_GET_RTC 65
-#define SSC_EXIT 66
-#define SSC_LOAD_SYMBOLS 69
-#define SSC_GET_TOD 74
-#define SSC_CTL_TRACE 76
-
-#define SSC_NETDEV_PROBE 100
-#define SSC_NETDEV_SEND 101
-#define SSC_NETDEV_RECV 102
-#define SSC_NETDEV_ATTACH 103
-#define SSC_NETDEV_DETACH 104
-
-/*
- * Simulator system call.
- */
-extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
-
-#ifdef XEN
-/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
- * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
-#define SSC_OPEN 50
-#define SSC_CLOSE 51
-#define SSC_READ 52
-#define SSC_WRITE 53
-#define SSC_GET_COMPLETION 54
-#define SSC_WAIT_COMPLETION 55
-
-#define SSC_WRITE_ACCESS 2
-#define SSC_READ_ACCESS 1
-
-struct ssc_disk_req {
- unsigned long addr;
- unsigned long len;
-};
-#endif
-
-#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|