# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 14a34d811e81cce0d224106d631ce009ce01f145
# Parent 259ba45ed77d7d4a78013b311884405c0a865c77
[IA64] introduce P2M conversion
introduce P2M conversion functions necessary for dom0vp model.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r 259ba45ed77d -r 14a34d811e81 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/arch/ia64/xen/domain.c Tue Apr 25 13:06:57 2006 -0600
@@ -54,7 +54,9 @@
#include <asm/regionreg.h>
#include <asm/dom_fw.h>
+#ifndef CONFIG_XEN_IA64_DOM0_VP
#define CONFIG_DOMAIN0_CONTIGUOUS
+#endif
unsigned long dom0_start = -1L;
unsigned long dom0_size = 512*1024*1024;
unsigned long dom0_align = 64*1024*1024;
@@ -503,98 +505,290 @@ void new_thread(struct vcpu *v,
}
}
+static pte_t*
+lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
+{
+ struct page_info *pt;
+ struct mm_struct *mm = d->arch.mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ BUG_ON(mm->pgd == NULL);
+ pgd = pgd_offset(mm, mpaddr);
+ if (pgd_none(*pgd)) {
+ pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pgd_val(*pgd));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
+
+ pud = pud_offset(pgd, mpaddr);
+ if (pud_none(*pud)) {
+ pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+ pt = maddr_to_page(pud_val(*pud));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
+
+ pmd = pmd_offset(pud, mpaddr);
+ if (pmd_none(*pmd)) {
+ pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm, mpaddr));
+ pt = maddr_to_page(pmd_val(*pmd));
+ list_add_tail(&pt->list, &d->arch.mm->pt_list);
+ }
+
+ return pte_offset_map(pmd, mpaddr);
+}
+
+//XXX xxx_none() should be used instread of !xxx_present()?
+static pte_t*
+lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr)
+{
+ struct mm_struct *mm = d->arch.mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ BUG_ON(mm->pgd == NULL);
+ pgd = pgd_offset(mm, mpaddr);
+ if (!pgd_present(*pgd))
+ goto not_present;
+
+ pud = pud_offset(pgd, mpaddr);
+ if (!pud_present(*pud))
+ goto not_present;
+
+ pmd = pmd_offset(pud, mpaddr);
+ if (!pmd_present(*pmd))
+ goto not_present;
+
+ return pte_offset_map(pmd, mpaddr);
+
+not_present:
+ return NULL;
+}
+
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+static pte_t*
+lookup_noalloc_domain_pte_none(struct domain* d, unsigned long mpaddr)
+{
+ struct mm_struct *mm = d->arch.mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ BUG_ON(mm->pgd == NULL);
+ pgd = pgd_offset(mm, mpaddr);
+ if (pgd_none(*pgd))
+ goto not_present;
+
+ pud = pud_offset(pgd, mpaddr);
+ if (pud_none(*pud))
+ goto not_present;
+
+ pmd = pmd_offset(pud, mpaddr);
+ if (pmd_none(*pmd))
+ goto not_present;
+
+ return pte_offset_map(pmd, mpaddr);
+
+not_present:
+ return NULL;
+}
+#endif
/* Allocate a new page for domain and map it to the specified metaphysical
address. */
-static struct page_info * assign_new_domain_page(struct domain *d, unsigned
long mpaddr)
-{
- unsigned long maddr;
- struct page_info *p;
+struct page_info *
+__assign_new_domain_page(struct domain *d, unsigned long mpaddr, pte_t* pte)
+{
+ struct page_info *p = NULL;
+ unsigned long maddr;
+
+ BUG_ON(!pte_none(*pte));
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
- if (d == dom0) {
- if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
- /* FIXME: is it true ?
- dom0 memory is not contiguous! */
- printk("assign_new_domain_page: bad domain0 "
- "mpaddr=%lx, start=%lx, end=%lx!\n",
- mpaddr, dom0_start, dom0_start+dom0_size);
- while(1);
- }
- p = mfn_to_page((mpaddr >> PAGE_SHIFT));
- }
- else
-#endif
- {
- p = alloc_domheap_page(d);
- // zero out pages for security reasons
- if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
- }
- if (unlikely(!p)) {
- printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
- return(p);
- }
- maddr = page_to_maddr (p);
- if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
- && maddr < __get_cpu_var(vhpt_pend))) {
- /* FIXME: how can this happen ?
- vhpt is allocated by alloc_domheap_page. */
- printf("assign_new_domain_page: reassigned vhpt page %lx!!\n",
- maddr);
- }
- assign_domain_page (d, mpaddr, maddr);
- return p;
+ if (d == dom0) {
+#if 0
+ if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
+ /* FIXME: is it true ?
+ dom0 memory is not contiguous! */
+ panic("assign_new_domain_page: bad domain0 "
+ "mpaddr=%lx, start=%lx, end=%lx!\n",
+ mpaddr, dom0_start, dom0_start+dom0_size);
+ }
+#endif
+ p = mfn_to_page((mpaddr >> PAGE_SHIFT));
+ return p;
+ }
+ else
+#endif
+ {
+ p = alloc_domheap_page(d);
+ // zero out pages for security reasons
+ if (p)
+ clear_page(page_to_virt(p));
+ }
+ if (unlikely(!p)) {
+ printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
+ return(p);
+ }
+ maddr = page_to_maddr (p);
+ if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
+ && maddr < __get_cpu_var(vhpt_pend))) {
+ /* FIXME: how can this happen ?
+ vhpt is allocated by alloc_domheap_page. */
+ printf("assign_new_domain_page: reassigned vhpt page %lx!!\n",
+ maddr);
+ }
+
+ set_pte(pte, pfn_pte(maddr >> PAGE_SHIFT,
+ __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+
+ //XXX CONFIG_XEN_IA64_DOM0_VP
+ // TODO racy
+ if ((mpaddr & GPFN_IO_MASK) == GPFN_MEM)
+ set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
+ return p;
+}
+
+struct page_info *
+assign_new_domain_page(struct domain *d, unsigned long mpaddr)
+{
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+ pte_t dummy_pte = __pte(0);
+ return __assign_new_domain_page(d, mpaddr, &dummy_pte);
+#else
+ struct page_info *p = NULL;
+ pte_t *pte;
+
+ pte = lookup_alloc_domain_pte(d, mpaddr);
+ if (pte_none(*pte)) {
+ p = __assign_new_domain_page(d, mpaddr, pte);
+ } else {
+ DPRINTK("%s: d 0x%p mpaddr %lx already mapped!\n",
+ __func__, d, mpaddr);
+ }
+
+ return p;
+#endif
+}
+
+void
+assign_new_domain0_page(struct domain *d, unsigned long mpaddr)
+{
+#ifndef CONFIG_DOMAIN0_CONTIGUOUS
+ pte_t *pte;
+
+ BUG_ON(d != dom0);
+ pte = lookup_alloc_domain_pte(d, mpaddr);
+ if (pte_none(*pte)) {
+ struct page_info *p = __assign_new_domain_page(d, mpaddr, pte);
+ if (p == NULL) {
+ panic("%s: can't allocate page for dom0", __func__);
+ }
+ }
+#endif
}
/* map a physical address to the specified metaphysical addr */
void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long
physaddr)
{
- struct mm_struct *mm = d->arch.mm;
- struct page_info *pt;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
pte_t *pte;
- if (!mm->pgd) {
- printk("assign_domain_page: domain pgd must exist!\n");
- return;
- }
- pgd = pgd_offset(mm,mpaddr);
- if (pgd_none(*pgd))
- {
- pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
- pt = maddr_to_page(pgd_val(*pgd));
- list_add_tail(&pt->list, &d->arch.mm->pt_list);
- }
-
- pud = pud_offset(pgd, mpaddr);
- if (pud_none(*pud))
- {
- pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
- pt = maddr_to_page(pud_val(*pud));
- list_add_tail(&pt->list, &d->arch.mm->pt_list);
- }
-
- pmd = pmd_offset(pud, mpaddr);
- if (pmd_none(*pmd))
- {
- pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
-// pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
- pt = maddr_to_page(pmd_val(*pmd));
- list_add_tail(&pt->list, &d->arch.mm->pt_list);
- }
-
- pte = pte_offset_map(pmd, mpaddr);
+ pte = lookup_alloc_domain_pte(d, mpaddr);
if (pte_none(*pte)) {
set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+
+ //XXX CONFIG_XEN_IA64_DOM0_VP
+ // TODO racy
+ if ((mpaddr & GPFN_IO_MASK) == GPFN_MEM)
+ set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
}
else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
- if((physaddr>>PAGE_SHIFT)<max_page){
- *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT);
+}
+
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+static void
+assign_domain_same_page(struct domain *d,
+ unsigned long mpaddr, unsigned long size)
+{
+ //XXX optimization
+ unsigned long end = mpaddr + size;
+ for (; mpaddr < end; mpaddr += PAGE_SIZE) {
+ assign_domain_page(d, mpaddr, mpaddr);
}
}
+
+unsigned long
+assign_domain_mmio_page(struct domain *d,
+ unsigned long mpaddr, unsigned long size)
+{
+ if (size == 0) {
+ DPRINTK("%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
+ __func__, d, mpaddr, size);
+ }
+ assign_domain_same_page(d, mpaddr, size);
+ return mpaddr;
+}
+
+unsigned long
+assign_domain_mach_page(struct domain *d,
+ unsigned long mpaddr, unsigned long size)
+{
+ assign_domain_same_page(d, mpaddr, size);
+ return mpaddr;
+}
+
+//XXX selege hammer.
+// flush finer range.
+void
+domain_page_flush(struct domain* d, unsigned long mpaddr,
+ unsigned long old_mfn, unsigned long new_mfn)
+{
+ struct vcpu* v;
+ //XXX SMP
+ for_each_vcpu(d, v) {
+ vcpu_purge_tr_entry(&v->arch.dtlb);
+ vcpu_purge_tr_entry(&v->arch.itlb);
+ }
+
+ // flush vhpt
+ vhpt_flush();
+ // flush tlb
+ flush_tlb_all();
+}
+
+static void
+zap_domain_page_one(struct domain *d, unsigned long mpaddr)
+{
+ struct mm_struct *mm = d->arch.mm;
+ pte_t *pte;
+ pte_t old_pte;
+ unsigned long mfn;
+ struct page_info *page;
+
+ pte = lookup_noalloc_domain_pte_none(d, mpaddr);
+ if (pte == NULL)
+ return;
+ if (pte_none(*pte))
+ return;
+
+ // update pte
+ old_pte = ptep_get_and_clear(mm, mpaddr, pte);
+ mfn = pte_pfn(old_pte);
+ page = mfn_to_page(mfn);
+
+ if (page_get_owner(page) == d) {
+ BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
+ set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+ }
+
+ domain_page_flush(d, mpaddr, mfn, INVALID_MFN);
+
+ put_page(page);
+}
+#endif
void build_physmap_table(struct domain *d)
{
@@ -620,12 +814,42 @@ void mpafoo(unsigned long mpaddr)
privop_trace = 1;
}
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+unsigned long
+____lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
+{
+ pte_t *pte;
+
+ pte = lookup_noalloc_domain_pte(d, mpaddr);
+ if (pte == NULL)
+ goto not_present;
+
+ if (pte_present(*pte))
+ return (pte->pte & _PFN_MASK);
+ else if (VMX_DOMAIN(d->vcpu[0]))
+ return GPFN_INV_MASK;
+
+not_present:
+ return INVALID_MFN;
+}
+
+unsigned long
+__lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
+{
+ unsigned long machine = ____lookup_domain_mpa(d, mpaddr);
+ if (machine != INVALID_MFN)
+ return machine;
+
+ printk("%s: d 0x%p id %d current 0x%p id %d\n",
+ __func__, d, d->domain_id, current, current->vcpu_id);
+ printk("%s: bad mpa 0x%lx (max_pages 0x%lx)\n",
+ __func__, mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
+ return INVALID_MFN;
+}
+#endif
+
unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
{
- struct mm_struct *mm = d->arch.mm;
- pgd_t *pgd = pgd_offset(mm, mpaddr);
- pud_t *pud;
- pmd_t *pmd;
pte_t *pte;
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
@@ -642,26 +866,23 @@ unsigned long lookup_domain_mpa(struct d
return *(unsigned long *)pte;
}
#endif
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd,mpaddr);
- if (pud_present(*pud)) {
- pmd = pmd_offset(pud,mpaddr);
- if (pmd_present(*pmd)) {
- pte = pte_offset_map(pmd,mpaddr);
- if (pte_present(*pte)) {
+ pte = lookup_noalloc_domain_pte(d, mpaddr);
+ if (pte != NULL) {
+ if (pte_present(*pte)) {
//printk("lookup_domain_page: found mapping for %lx,
pte=%lx\n",mpaddr,pte_val(*pte));
- return *(unsigned long *)pte;
- } else if (VMX_DOMAIN(d->vcpu[0]))
- return GPFN_INV_MASK;
- }
- }
- }
- if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
- printk("lookup_domain_mpa: non-allocated mpa 0x%lx (< 0x%lx)\n",
- mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
- } else
- printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
- mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
+ return *(unsigned long *)pte;
+ } else if (VMX_DOMAIN(d->vcpu[0]))
+ return GPFN_INV_MASK;
+ }
+
+ printk("%s: d 0x%p id %d current 0x%p id %d\n",
+ __func__, d, d->domain_id, current, current->vcpu_id);
+ if ((mpaddr >> PAGE_SHIFT) < d->max_pages)
+ printk("%s: non-allocated mpa 0x%lx (< 0x%lx)\n", __func__,
+ mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
+ else
+ printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
+ mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
mpafoo(mpaddr);
return 0;
}
diff -r 259ba45ed77d -r 14a34d811e81 xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/arch/ia64/xen/hyperprivop.S Tue Apr 25 13:06:57 2006 -0600
@@ -26,9 +26,20 @@
#define FAST_REFLECT_CNT
//#define FAST_TICK // mostly working (unat problems) but default off for now
//#define FAST_TLB_MISS_REFLECT // mostly working but default off for
now
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+#undef FAST_ITC //XXX CONFIG_XEN_IA64_DOM0_VP
+ // TODO fast_itc doesn't suport dom0 vp yet.
+#else
//#define FAST_ITC // working but default off for now
+#endif
#define FAST_BREAK
-#define FAST_ACCESS_REFLECT
+#ifndef CONFIG_XEN_IA64_DOM0_VP
+# define FAST_ACCESS_REFLECT
+#else
+# undef FAST_ACCESS_REFLECT //XXX CONFIG_XEN_IA64_DOM0_VP
+ // TODO fast_access_reflect
+ // doesn't support dom0 vp yet.
+#endif
#define FAST_RFI
#define FAST_SSM_I
#define FAST_PTC_GA
diff -r 259ba45ed77d -r 14a34d811e81 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/arch/ia64/xen/process.c Tue Apr 25 13:06:57 2006 -0600
@@ -81,18 +81,25 @@ void tdpfoo(void) { }
// address, convert the pte for a physical address for (possibly different)
// Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
// PAGE_SIZE!)
-unsigned long translate_domain_pte(unsigned long pteval,
- unsigned long address, unsigned long itir)
+u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
{
struct domain *d = current->domain;
- unsigned long mask, pteval2, mpaddr;
+ ia64_itir_t itir = {.itir = itir__};
+ u64 mask, mpaddr, pteval2;
pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
// FIXME address had better be pre-validated on insert
- mask = ~itir_mask(itir);
+ mask = ~itir_mask(itir.itir);
mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
(address & mask);
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+ if (itir.ps > PAGE_SHIFT) {
+ itir.ps = PAGE_SHIFT;
+ }
+#endif
+ *logps = itir.ps;
+#ifndef CONFIG_XEN_IA64_DOM0_VP
if (d == dom0) {
if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
/*
@@ -112,9 +119,10 @@ unsigned long translate_domain_pte(unsig
printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
"vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
- address, pteval, itir);
+ address, pteval, itir.itir);
tdpfoo();
}
+#endif
pteval2 = lookup_domain_mpa(d,mpaddr);
pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
pteval2 |= (pteval & _PAGE_ED);
@@ -128,6 +136,7 @@ unsigned long translate_domain_mpaddr(un
{
unsigned long pteval;
+#ifndef CONFIG_XEN_IA64_DOM0_VP
if (current->domain == dom0) {
if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
printk("translate_domain_mpaddr: out-of-bounds dom0
mpaddr 0x%lx! continuing...\n",
@@ -135,6 +144,7 @@ unsigned long translate_domain_mpaddr(un
tdpfoo();
}
}
+#endif
pteval = lookup_domain_mpa(current->domain,mpaddr);
return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
}
@@ -294,8 +304,9 @@ void ia64_do_page_fault (unsigned long a
again:
fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
- pteval = translate_domain_pte(pteval,address,itir);
-
vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
+ u64 logps;
+ pteval = translate_domain_pte(pteval, address, itir, &logps);
+ vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
/* dtlb has been purged in-between. This dtlb was
matching. Undo the work. */
diff -r 259ba45ed77d -r 14a34d811e81 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/arch/ia64/xen/vcpu.c Tue Apr 25 13:06:57 2006 -0600
@@ -25,7 +25,6 @@ extern void getfpreg (unsigned long regn
extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct
pt_regs *regs);
extern void panic_domain(struct pt_regs *, const char *, ...);
-extern unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
extern unsigned long translate_domain_mpaddr(unsigned long);
extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
@@ -1276,6 +1275,7 @@ static inline int vcpu_match_tr_entry(TR
return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
}
+// in_tpa is not used when CONFIG_XEN_IA64_DOM0_VP
IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN
in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
{
unsigned long region = address >> 61;
@@ -1353,8 +1353,12 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
pte = trp->pte;
if (/* is_data && */ pte.p
&& vcpu_match_tr_entry_no_p(trp,address,rid)) {
- if (vcpu->domain==dom0 && !in_tpa) *pteval = pte.val;
- else *pteval = vcpu->arch.dtlb_pte;
+#ifndef CONFIG_XEN_IA64_DOM0_VP
+ if (vcpu->domain==dom0 && !in_tpa)
+ *pteval = pte.val;
+ else
+#endif
+ *pteval = vcpu->arch.dtlb_pte;
*itir = trp->itir;
dtlb_translate_count++;
return IA64_USE_TLB;
@@ -1689,7 +1693,7 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6
VCPU translation register access routines
**************************************************************************/
-static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
+void vcpu_purge_tr_entry(TR_ENTRY *trp)
{
trp->pte.val = 0;
}
@@ -1758,6 +1762,9 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
//FIXME: kill domain here
while(1);
}
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+ BUG_ON(logps > PAGE_SHIFT);
+#endif
psr = ia64_clear_ic();
ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
ia64_set_psr(psr);
@@ -1798,7 +1805,7 @@ IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64
while(1);
}
//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
- pteval = translate_domain_pte(pte,ifa,itir);
+ pteval = translate_domain_pte(pte, ifa, itir, &logps);
if (!pteval) return IA64_ILLOP_FAULT;
if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
@@ -1818,7 +1825,7 @@ IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64
while(1);
}
//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
- pteval = translate_domain_pte(pte,ifa,itir);
+ pteval = translate_domain_pte(pte, ifa, itir, &logps);
// FIXME: what to do if bad physical address? (machine check?)
if (!pteval) return IA64_ILLOP_FAULT;
if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
diff -r 259ba45ed77d -r 14a34d811e81 xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/arch/ia64/xen/xenmisc.c Tue Apr 25 13:06:57 2006 -0600
@@ -87,9 +87,12 @@ unsigned long
unsigned long
gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
{
+#ifndef CONFIG_XEN_IA64_DOM0_VP
if (d == dom0)
return(gpfn);
- else {
+ else
+#endif
+ {
unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
if (!pte) {
printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
diff -r 259ba45ed77d -r 14a34d811e81 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/include/asm-ia64/domain.h Tue Apr 25 13:06:57 2006 -0600
@@ -162,6 +162,15 @@ struct mm_struct {
extern struct mm_struct init_mm;
+struct page_info * assign_new_domain_page(struct domain *d, unsigned long
mpaddr);
+void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
+void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long
physaddr);
+void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned
long flags);
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
unsigned long size);
+unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr,
unsigned long size);
+#endif
+
#include <asm/uaccess.h> /* for KERNEL_DS */
#include <asm/pgtable.h>
diff -r 259ba45ed77d -r 14a34d811e81 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/include/asm-ia64/mm.h Tue Apr 25 13:06:57 2006 -0600
@@ -415,8 +415,12 @@ extern int nr_swap_pages;
extern unsigned long *mpt_table;
extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
+extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64*
logps);
extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
-#undef machine_to_phys_mapping
+#ifdef CONFIG_XEN_IA64_DOM0_VP
+extern unsigned long __lookup_domain_mpa(struct domain *d, unsigned long
mpaddr);
+extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long
mpaddr);
+#endif
#define machine_to_phys_mapping mpt_table
#define INVALID_M2P_ENTRY (~0UL)
diff -r 259ba45ed77d -r 14a34d811e81 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/include/asm-ia64/vcpu.h Tue Apr 25 13:06:57 2006 -0600
@@ -133,6 +133,7 @@ extern IA64FAULT vcpu_set_pkr(VCPU *vcpu
extern IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
extern IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
/* TLB */
+extern void vcpu_purge_tr_entry(TR_ENTRY *trp);
extern IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 padr,
UINT64 itir, UINT64 ifa);
extern IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 padr,
diff -r 259ba45ed77d -r 14a34d811e81 xen/include/asm-ia64/xenprocessor.h
--- a/xen/include/asm-ia64/xenprocessor.h Tue Apr 25 10:54:45 2006 -0700
+++ b/xen/include/asm-ia64/xenprocessor.h Tue Apr 25 13:06:57 2006 -0600
@@ -221,4 +221,20 @@ typedef union {
DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
+typedef union {
+ struct {
+ u64 rv3 : 2; // 0-1
+ u64 ps : 6; // 2-7
+ u64 key : 24; // 8-31
+ u64 rv4 : 32; // 32-63
+ };
+ struct {
+ u64 __rv3 : 32; // 0-31
+ // next extension to rv4
+ u64 rid : 24; // 32-55
+ u64 __rv4 : 8; // 56-63
+ };
+ u64 itir;
+} ia64_itir_t;
+
#endif // _ASM_IA64_XENPROCESSOR_H
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|