Hi,
attached is my patch for using a configurable privilege level for PV-domain
privilege level 0 emulation.
The used pl can be set in xen/include/asm-ia64/config.h (in this patch I'm
still using '2' as before).
Tested by booting dom0.
Juergen
--
Juergen Gross Principal Developer
IP SW OS6 Telephone: +49 (0) 89 636 47950
Fujitsu Siemens Computers e-mail: juergen.gross@xxxxxxxxxxxxxxxxxxx
Otto-Hahn-Ring 6 Internet: www.fujitsu-siemens.com
D-81739 Muenchen Company details: www.fujitsu-siemens.com/imprint.html
# HG changeset patch
# User juergen.gross@xxxxxxxxxxxxxxxxxxx
# Node ID 985ececb1dcd1c7ef37170c641b41141a87afaa2
# Parent 8a6a6d4afcb31c24ee87a5d30bebec41e8d38126
don't use hard wired privilege level 2 for domain kernel
Signed-off-by: Juergen Gross <juergen.gross@xxxxxxxxxxxxxxxxxxx>
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/arch/ia64/xen/faults.c Tue Jun 26 10:52:55 2007 +0200
@@ -38,10 +38,9 @@ extern int ia64_hyperprivop(unsigned lon
extern int ia64_hyperprivop(unsigned long, REGS *);
extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
-#define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
// note IA64_PSR_PK removed from following, why is this necessary?
#define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
- IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
+ IA64_PSR_DT | IA64_PSR_RT | \
IA64_PSR_IT | IA64_PSR_BN)
#define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH |
\
@@ -92,6 +91,7 @@ static void reflect_interruption(unsigne
regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
+ regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
if (PSCB(v, dcr) & IA64_DCR_BE)
regs->cr_ipsr |= IA64_PSR_BE;
@@ -137,6 +137,7 @@ void reflect_event(void)
regs->cr_iip = v->arch.event_callback_ip;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
+ regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
if (PSCB(v, dcr) & IA64_DCR_BE)
regs->cr_ipsr |= IA64_PSR_BE;
@@ -236,6 +237,7 @@ void ia64_do_page_fault(unsigned long ad
((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
regs->cr_ipsr =
(regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
+ regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
if (PSCB(current, hpsr_dfh))
regs->cr_ipsr |= IA64_PSR_DFH;
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S Tue Jun 26 10:52:55 2007 +0200
@@ -18,9 +18,8 @@
#define _PAGE_PPN_MASK 0x0003fffffffff000 //asm/pgtable.h doesn't do
assembly
-#define PAGE_PHYS 0x0010000000000761 //__pgprot(__DIRTY_BITS|
- // _PAGE_PL_2|_PAGE_AR_RWX)
-#define _PAGE_PL_2 (2<<7)
+#define PAGE_PHYS (0x0010000000000661 | _PAGE_PL_PRIV)
+ //__pgprot(__DIRTY_BITS|_PAGE_PL_PRIV|_PAGE_AR_RWX)
#if 1 // change to 0 to turn off all fast paths
# define FAST_HYPERPRIVOPS
@@ -62,7 +61,7 @@
#define IA64_PSR_CPL0 (__IA64_UL(1) << IA64_PSR_CPL0_BIT)
// note IA64_PSR_PK removed from following, why is this necessary?
#define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
- IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
+ IA64_PSR_DT | IA64_PSR_RT | \
IA64_PSR_IT | IA64_PSR_BN)
#define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
@@ -249,8 +248,8 @@ ENTRY(hyper_ssm_i)
mov r29=r30 ;;
movl r28=DELIVER_PSR_SET;;
movl r27=~DELIVER_PSR_CLR;;
+ and r29=r29,r27;;
or r29=r29,r28;;
- and r29=r29,r27;;
// set hpsr_dfh to ipsr
adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
ld1 r28=[r28];;
@@ -258,8 +257,7 @@ ENTRY(hyper_ssm_i)
mov cr.ipsr=r29;;
// set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
- cmp.eq p6,p7=3,r29;;
-(p6) dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
+ cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
(p7) dep r30=0,r30,IA64_PSR_CPL0_BIT,2
;;
// FOR SSM_I ONLY, also turn on psr.i and psr.ic
@@ -441,20 +439,18 @@ GLOBAL_ENTRY(fast_tick_reflect)
st8 [r21]=r16 ;;
// set cr.ipsr (make sure cpl==2!)
mov r29=r17 ;;
- movl r28=DELIVER_PSR_SET;;
- movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
+ movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT);;
+ movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0|IA64_PSR_CPL1);;
+ and r29=r29,r27;;
or r29=r29,r28;;
- and r29=r29,r27;;
mov cr.ipsr=r29;;
// set shared_mem ipsr (from ipsr in r17 with ipsr.ri already set)
extr.u r29=r17,IA64_PSR_CPL0_BIT,2;;
- cmp.eq p6,p7=3,r29;;
-(p6) dep r17=-1,r17,IA64_PSR_CPL0_BIT,2
+ cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
(p7) dep r17=0,r17,IA64_PSR_CPL0_BIT,2
;;
movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT);;
movl r27=~(IA64_PSR_BE|IA64_PSR_PP|IA64_PSR_BN|IA64_PSR_I|IA64_PSR_IC);;
- dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
or r17=r17,r28;;
and r17=r17,r27;;
ld4 r16=[r18];;
@@ -620,10 +616,10 @@ ENTRY(fast_reflect)
movl r21=THIS_CPU(current_psr_i_addr)
mov r29=r30 ;;
ld8 r21=[r21]
- movl r28=DELIVER_PSR_SET;;
- movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
+ movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT);;
+ movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0|IA64_PSR_CPL1);;
+ and r29=r29,r27;;
or r29=r29,r28;;
- and r29=r29,r27;;
// set hpsr_dfh to ipsr
adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
ld1 r28=[r28];;
@@ -631,8 +627,7 @@ ENTRY(fast_reflect)
mov cr.ipsr=r29;;
// set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
- cmp.eq p6,p7=3,r29;;
-(p6) dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
+ cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
(p7) dep r30=0,r30,IA64_PSR_CPL0_BIT,2
;;
movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT);;
@@ -1112,14 +1107,17 @@ 1: // OK now, let's do an rfi.
just_do_rfi:
// r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
- mov cr.iip=r22;;
+ mov cr.iip=r22
+ extr.u r19=r21,IA64_PSR_CPL0_BIT,2
adds r20=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ cmp.gtu p7,p0=CONFIG_CPL0_EMUL,r19
ld8 r20=[r20];;
+(p7) mov r19=CONFIG_CPL0_EMUL
dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
mov cr.ifs=r20 ;;
- // ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
+ // ipsr.cpl = max(vcr.ipsr.cpl, IA64_PSR_CPL0_BIT);
movl r20=THIS_CPU(current_psr_i_addr)
- dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
+ dep r21=r19,r21,IA64_PSR_CPL0_BIT,2;;
// vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
ld8 r20=[r20]
mov r19=1
@@ -1287,12 +1285,12 @@ ENTRY(rfi_with_interrupt)
movl r22=THIS_CPU(current_psr_i_addr)
// set cr.ipsr (make sure cpl==2!)
mov r29=r17
- movl r28=DELIVER_PSR_SET;;
+ movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0|IA64_PSR_CPL1)
+ movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT);;
mov r20=1;;
ld8 r22=[r22]
- movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0)
+ and r29=r29,r27;;
or r29=r29,r28;;
- and r29=r29,r27;;
mov cr.ipsr=r29;;
// v.ipsr and v.iip are already set (and v.iip validated) as rfi target
// set shared_mem interrupt_delivery_enabled to 0
@@ -1935,7 +1933,7 @@ ENTRY(fast_insert)
or r20=r20,r21 ;; // r20==return value from lookup_domain_mpa
// r16=pteval,r20=pteval2
movl r19=_PAGE_PPN_MASK
- movl r21=_PAGE_PL_2;;
+ movl r21=_PAGE_PL_PRIV;;
andcm r25=r16,r19;; // r25==pteval & ~_PAGE_PPN_MASK
and r22=r20,r19;;
or r22=r22,r21;;
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/arch/ia64/xen/ivt.S
--- a/xen/arch/ia64/xen/ivt.S Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/arch/ia64/xen/ivt.S Tue Jun 26 10:52:55 2007 +0200
@@ -510,7 +510,8 @@ ENTRY(break_fault)
(p7) br.spnt.many dispatch_privop_fault
;;
#endif
- // if (ipsr.cpl == 2 && (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
+ // if (ipsr.cpl == CONFIG_CPL0_EMUL &&
+ // (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
// this is a hyperprivop. A hyperprivop is hand-coded assembly with
// psr.ic off which means it can make no calls, cannot use r1-r15,
// and it can have no memory accesses unless they are to pinned
@@ -524,7 +525,7 @@ ENTRY(break_fault)
;;
cmp.gtu p7,p0=r21,r20
;;
- cmp.eq.and p7,p0=2,r19 // ipsr.cpl==2
+ cmp.eq.and p7,p0=CONFIG_CPL0_EMUL,r19 // ipsr.cpl==CONFIG_CPL0_EMUL
(p7) br.sptk.many fast_hyperprivop
;;
movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
@@ -535,7 +536,7 @@ ENTRY(break_fault)
;;
ld4 r23=[r23];;
cmp4.eq p6,p0=r23,r17;; // Xen-reserved breakimm?
- cmp.eq.and p6,p0=2,r19
+ cmp.eq.and p6,p0=CONFIG_CPL0_EMUL,r19
(p6) br.spnt.many fast_hypercall
;;
br.sptk.many fast_break_reflect
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/arch/ia64/xen/mm.c Tue Jun 26 10:52:55 2007 +0200
@@ -546,7 +546,7 @@ u64 translate_domain_pte(u64 pteval, u64
/* Ignore non-addr bits of pteval2 and force PL0->2
(PL3 is unaffected) */
return (pteval & ~_PAGE_PPN_MASK) |
- (pteval2 & _PAGE_PPN_MASK) | _PAGE_PL_2;
+ (pteval2 & _PAGE_PPN_MASK) | _PAGE_PL_PRIV;
}
// given a current domain metaphysical address, return the physical address
@@ -711,7 +711,8 @@ unsigned long lookup_domain_mpa(struct d
p2m_entry_set(entry, NULL, __pte(0));
//XXX This is a work around until the emulation memory access to a region
// where memory or device are attached is implemented.
- return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_2 |
_PAGE_AR_RWX)));
+ return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_PRIV |
+ _PAGE_AR_RWX)));
}
// FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
@@ -785,7 +786,7 @@ __assign_new_domain_page(struct domain *
set_pte_rel(pte,
pfn_pte(maddr >> PAGE_SHIFT,
__pgprot(_PAGE_PGC_ALLOCATED | __DIRTY_BITS |
- _PAGE_PL_2 | _PAGE_AR_RWX)));
+ _PAGE_PL_PRIV | _PAGE_AR_RWX)));
smp_mb();
return p;
@@ -820,7 +821,7 @@ static unsigned long
static unsigned long
flags_to_prot (unsigned long flags)
{
- unsigned long res = _PAGE_PL_2 | __DIRTY_BITS;
+ unsigned long res = _PAGE_PL_PRIV | __DIRTY_BITS;
res |= flags & ASSIGN_readonly ? _PAGE_AR_R: _PAGE_AR_RWX;
res |= flags & ASSIGN_nocache ? _PAGE_MA_UC: _PAGE_MA_WB;
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/arch/ia64/xen/vcpu.c Tue Jun 26 10:52:55 2007 +0200
@@ -158,7 +158,7 @@ void vcpu_init_regs(struct vcpu *v)
regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
| IA64_PSR_RI | IA64_PSR_IS);
// domain runs at PL2
- regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
+ regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
// lazy fp
PSCB(v, hpsr_dfh) = 1;
PSCB(v, hpsr_mfh) = 0;
@@ -174,7 +174,7 @@ void vcpu_init_regs(struct vcpu *v)
VCPU(v, dcr) = 0;
} else {
init_all_rr(v);
- regs->ar_rsc |= (2 << 2); /* force PL2/3 */
+ regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);
VCPU(v, banknum) = 1;
VCPU(v, metaphysical_mode) = 1;
VCPU(v, interrupt_mask_addr) =
@@ -496,7 +496,7 @@ IA64FAULT vcpu_set_psr(VCPU * vcpu, u64
PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic;
vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it));
- newpsr.cpl |= vpsr.cpl | 2;
+ newpsr.cpl |= max(vpsr.cpl, CONFIG_CPL0_EMUL);
if (PSCB(vcpu, banknum) != vpsr.bn) {
if (vpsr.bn)
@@ -535,10 +535,10 @@ u64 vcpu_get_psr(VCPU * vcpu)
newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
/* Fool cpl. */
- if (ipsr.ia64_psr.cpl < 3)
+ if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL)
newpsr.ia64_psr.cpl = 0;
else
- newpsr.ia64_psr.cpl = 3;
+ newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl;
newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
@@ -1646,7 +1646,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
} else {
*pteval = (address & _PAGE_PPN_MASK) |
- __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
+ __DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;
*itir = PAGE_SHIFT << 2;
perfc_incr(phys_translate);
return IA64_NO_FAULT;
@@ -1711,7 +1711,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
REGS *regs = vcpu_regs(vcpu);
// NOTE: This is specific code for linux kernel
// We assume region 7 is identity mapped
- if (region == 7 && ia64_psr(regs)->cpl == 2) {
+ if (region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) {
pte.val = address & _PAGE_PPN_MASK;
pte.val = pte.val | pgprot_val(PAGE_KERNEL);
goto out;
@@ -2062,8 +2062,8 @@ vcpu_set_tr_entry_rid(TR_ENTRY * trp, u6
trp->rid = rid;
ps = trp->ps;
new_pte.val = pte;
- if (new_pte.pl < 2)
- new_pte.pl = 2;
+ if (new_pte.pl < CONFIG_CPL0_EMUL)
+ new_pte.pl = CONFIG_CPL0_EMUL;
trp->vadr = ifa & ~0xfff;
if (ps > 12) { // "ignore" relevant low-order bits
new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/arch/ia64/xen/xenasm.S
--- a/xen/arch/ia64/xen/xenasm.S Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/arch/ia64/xen/xenasm.S Tue Jun 26 10:52:55 2007 +0200
@@ -11,6 +11,7 @@
#include <asm/pgtable.h>
#include <asm/vhpt.h>
#include <asm/asm-xsi-offsets.h>
+#include <asm/vmmu.h>
#include <public/xen.h>
// Change rr7 to the passed value while ensuring
@@ -148,7 +149,7 @@ 1:
// Shared info
mov r24=XSI_SHIFT<<2
- movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
+ movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
;;
ptr.d in3,r24
or r23=in1,r25 // construct PA | page properties
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/include/asm-ia64/config.h Tue Jun 26 10:52:55 2007 +0200
@@ -54,6 +54,9 @@
#define NR_hypercalls 64
+
+/* PV domains use this value for priv. level 0 emulation */
+#define CONFIG_CPL0_EMUL 2
#ifndef __ASSEMBLY__
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/include/asm-ia64/linux-xen/asm/ptrace.h
--- a/xen/include/asm-ia64/linux-xen/asm/ptrace.h Fri Jun 22 11:48:49
2007 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/ptrace.h Tue Jun 26 10:52:55
2007 +0200
@@ -267,7 +267,7 @@ struct switch_stack {
# define ia64_psr(regs) ((struct ia64_psr *)
&(regs)->cr_ipsr)
#ifdef XEN
# define guest_mode(regs) (ia64_psr(regs)->cpl != 0)
-# define guest_kernel_mode(regs) (ia64_psr(regs)->cpl == 2)
+# define guest_kernel_mode(regs) (ia64_psr(regs)->cpl ==
CONFIG_CPL0_EMUL)
#else
# define user_mode(regs) (((struct ia64_psr *)
&(regs)->cr_ipsr)->cpl != 0)
#endif
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/include/asm-ia64/vcpu.h Tue Jun 26 10:52:55 2007 +0200
@@ -203,6 +203,16 @@ static inline s64 vcpu_get_next_timer_ns
return vcpu_get_next_timer_ns;
}
+static inline u64 vcpu_pl_adjust(u64 reg, u64 shift)
+{
+ u64 pl;
+
+ pl = reg & (3UL << shift);
+ if (pl < ((u64)CONFIG_CPL0_EMUL << shift))
+ pl = (u64)CONFIG_CPL0_EMUL << shift;
+ return (reg & ~(3UL << shift)) | pl;
+}
+
#define verbose(a...) do {if (vcpu_verbose) printk(a);} while(0)
//#define vcpu_quick_region_check(_tr_regions,_ifa) 1
@@ -210,5 +220,5 @@ static inline s64 vcpu_get_next_timer_ns
(_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
#define vcpu_quick_region_set(_tr_regions,_ifa) \
do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
-
+
#endif
diff -r 8a6a6d4afcb3 -r 985ececb1dcd xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h Fri Jun 22 11:48:49 2007 -0600
+++ b/xen/include/asm-ia64/vmmu.h Tue Jun 26 10:52:55 2007 +0200
@@ -32,6 +32,7 @@
#define VCPU_VHPT_ORDER (VCPU_VHPT_SHIFT - PAGE_SHIFT)
#define VTLB(v,_x) (v->arch.vtlb._x)
#define VHPT(v,_x) (v->arch.vhpt._x)
+#define _PAGE_PL_PRIV (CONFIG_CPL0_EMUL << 7)
#ifndef __ASSEMBLY__
#include <xen/config.h>
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|