# HG changeset patch # User dietmar.hahn@xxxxxxxxxxxxxxxxxxx # Node ID 44ccb8aa58ccf0943c8a7a6c563de2ca1f8a2cb0 # Parent 87b0b6a08dbdf5882c6223b0b6b7b189a15b0482 First simple implementation of supporting protection keys for PV guests. 1 PKR with key 0 is reserved for Xen. Signed-off-by: Dietmar Hahn diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/arch/ia64/Rules.mk --- a/xen/arch/ia64/Rules.mk Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/arch/ia64/Rules.mk Wed Jul 11 15:37:09 2007 +0200 @@ -10,6 +10,7 @@ xen_ia64_tlb_track ?= y xen_ia64_tlb_track ?= y xen_ia64_tlb_track_cnt ?= n xen_ia64_tlbflush_clock ?= y +xen_ia64_use_pkr ?= n ifneq ($(COMPILE_ARCH),$(TARGET_ARCH)) CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux- @@ -54,6 +55,9 @@ ifeq ($(no_warns),y) ifeq ($(no_warns),y) CFLAGS += -Wa,--fatal-warnings -Werror -Wno-uninitialized endif +ifeq ($(xen_ia64_use_pkr),y) +CFLAGS += -DCONFIG_XEN_IA64_USE_PKR +endif LDFLAGS := -g diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/arch/ia64/xen/domain.c Wed Jul 11 15:37:09 2007 +0200 @@ -262,6 +262,9 @@ void context_switch(struct vcpu *prev, s load_region_regs(current); ia64_set_pta(vcpu_pta(current)); vcpu_load_kernel_regs(current); +#if defined(CONFIG_XEN_IA64_USE_PKR) + vcpu_load_pkr_regs(current); +#endif vcpu_set_next_timer(current); if (vcpu_timer_expired(current)) vcpu_pend_timer(current); diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/arch/ia64/xen/faults.c --- a/xen/arch/ia64/xen/faults.c Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/arch/ia64/xen/faults.c Wed Jul 11 15:37:09 2007 +0200 @@ -189,10 +189,18 @@ void ia64_do_page_fault(unsigned long ad if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) { struct p2m_entry entry; unsigned long m_pteval; +#if defined(CONFIG_XEN_IA64_USE_PKR) + u64 key = 0x444; + m_pteval = translate_domain_pte(pteval, address, itir, + &logps, &key, &entry); + vcpu_itc_no_srlz(current, is_data ? 2 : 1, address, + m_pteval, pteval, logps, key, &entry); +#else m_pteval = translate_domain_pte(pteval, address, itir, &logps, &entry); vcpu_itc_no_srlz(current, is_data ? 2 : 1, address, m_pteval, pteval, logps, &entry); +#endif if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) || p2m_entry_retry(&entry)) { /* dtlb has been purged in-between. This dtlb was @@ -572,6 +580,14 @@ ia64_handle_reflection(unsigned long ifa BUG_ON(!(psr & IA64_PSR_CPL)); switch (vector) { +#if defined(CONFIG_XEN_IA64_USE_PKR) + case 6: + vector = IA64_INST_KEY_MISS_VECTOR; + break; + case 7: + vector = IA64_DATA_KEY_MISS_VECTOR; + break; +#endif case 8: vector = IA64_DIRTY_BIT_VECTOR; break; diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/arch/ia64/xen/fw_emul.c --- a/xen/arch/ia64/xen/fw_emul.c Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/arch/ia64/xen/fw_emul.c Wed Jul 11 15:37:09 2007 +0200 @@ -669,7 +669,11 @@ xen_pal_emulator(unsigned long index, u6 { .vw = 1, .phys_add_size = 44, .key_size = 16, +#if defined(CONFIG_XEN_IA64_USE_PKR) + .max_pkr = NPKRS, +#else .max_pkr = 15, +#endif .hash_tag_id = 0x30, .max_dtr_entry = NDTRS - 1, .max_itr_entry = NITRS - 1, diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/arch/ia64/xen/mm.c Wed Jul 11 15:37:09 2007 +0200 @@ -448,8 +448,13 @@ gmfn_to_mfn_foreign(struct domain *d, un // address, convert the pte for a physical address for (possibly different) // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use // PAGE_SIZE!) +#if defined(CONFIG_XEN_IA64_USE_PKR) +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, + u64* key, struct p2m_entry* entry) +#else u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry) +#endif { struct domain *d = current->domain; ia64_itir_t itir = {.itir = itir__}; @@ -468,6 +473,9 @@ u64 translate_domain_pte(u64 pteval, u64 itir.ps = PAGE_SHIFT; *logps = itir.ps; +#if defined(CONFIG_XEN_IA64_USE_PKR) + *key = itir.key; +#endif pteval2 = lookup_domain_mpa(d, mpaddr, entry); diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/arch/ia64/xen/vcpu.c --- a/xen/arch/ia64/xen/vcpu.c Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/arch/ia64/xen/vcpu.c Wed Jul 11 15:37:09 2007 +0200 @@ -241,6 +241,54 @@ IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 r return IA64_NO_FAULT; } +#if defined(CONFIG_XEN_IA64_USE_PKR) + +/* The function loads all the pkr from the struct arch_domain into the + * processor pkr. Called in context_switch(). + */ +void +vcpu_load_pkr_regs(VCPU * vcpu) +{ + /* TODO */ + if (PSCBX(vcpu, pkr_flags) & PKR_IN_USE) { + int i; + for (i = 0; i <= NPKRS; i++) + ia64_set_pkr(i, PSCBX(vcpu, pkrs[i])); + } +} + +/* Taken from xen/include/asm-ia64/linux-xen/asm/processor.h and modified: + * Insert a translation into the instruction and/or data translation + * cache. + */ +static inline void +ia64_itc_PKR (__u64 target_mask, __u64 vmaddr, __u64 pte, + __u64 log_page_size, __u64 key) +{ + ia64_setreg(_IA64_REG_CR_ITIR, IA64_ITIR_PS_KEY(log_page_size,key)); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); + /* as per EAS2.6, itc must be the last instruction in an instruction + * group + */ + if (target_mask & 0x1) + ia64_itci(pte); + if (target_mask & 0x2) + ia64_itcd(pte); +} + +static void +vcpu_set_psr_pk_handling(VCPU * vcpu) +{ + if (PSCBX(vcpu, pkr_flags) & PKR_IN_USE) { + return; + } + PSCBX(vcpu, pkrs[NPKRS]) = XEN_PKR_VAL; + ia64_set_pkr(NPKRS, XEN_PKR_VAL); +} + +#endif /* defined(CONFIG_XEN_IA64_USE_PKR) */ + /************************************************************************** VCPU processor status register access routines **************************************************************************/ @@ -284,8 +332,10 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, // just handle psr.up and psr.pp for now if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT | - IA64_PSR_DFL | IA64_PSR_DFH)) + IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_PK)) + { return IA64_ILLOP_FAULT; + } if (imm.dfh) { ipsr->dfh = PSCB(vcpu, hpsr_dfh); PSCB(vcpu, vpsr_dfh) = 0; @@ -309,6 +359,17 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, ipsr->be = 0; if (imm.dt) vcpu_set_metaphysical_mode(vcpu, TRUE); +#if defined(CONFIG_XEN_IA64_USE_PKR) + if (imm.pk) { + ipsr->pk = 0; + PSCBX(vcpu, pkr_flags) &= ~PKR_IN_USE; + } +#else + if (imm.pk) { + ipsr->pk = 0; + printk("%s: protection keys not supported\n", __func__); + } +#endif __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory"); return IA64_NO_FAULT; } @@ -340,9 +401,12 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u // just handle psr.sp,pp and psr.i,ic (and user mask) for now mask = IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM | - IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE; + IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE | + IA64_PSR_PK; if (imm24 & ~mask) + { return IA64_ILLOP_FAULT; + } if (imm.dfh) { PSCB(vcpu, vpsr_dfh) = 1; ipsr->dfh = 1; @@ -388,6 +452,19 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u ipsr->be = 1; if (imm.dt) vcpu_set_metaphysical_mode(vcpu, FALSE); +#if defined(CONFIG_XEN_IA64_USE_PKR) + if (imm.pk) { + vcpu_set_psr_pk_handling(vcpu); + PSCBX(vcpu, pkr_flags) |= PKR_IN_USE; + ipsr->pk = 1; + } + else PSCBX(vcpu, pkr_flags) &= ~PKR_IN_USE; +#else + if (imm.pk) { + printk("%s: protection keys not supported\n", __func__); + return IA64_ILLOP_FAULT; + } +#endif __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory"); if (enabling_interrupts && vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) @@ -448,6 +525,19 @@ IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u6 vcpu_set_metaphysical_mode(vcpu, TRUE); if (newpsr.be) ipsr->be = 1; +#if defined(CONFIG_XEN_IA64_USE_PKR) + if (newpsr.pk) { + vcpu_set_psr_pk_handling(vcpu); + PSCBX(vcpu, pkr_flags) |= PKR_IN_USE; + ipsr->pk = 1; + } + else PSCBX(vcpu, pkr_flags) &= ~PKR_IN_USE; +#else + if (newpsr.pk) { + printk("%s: protection keys not supported\n", __func__); + return IA64_ILLOP_FAULT; + } +#endif if (enabling_interrupts && vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) PSCB(vcpu, pending_interruption) = 1; @@ -504,6 +594,19 @@ IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 else vcpu_bsw0(vcpu); } +#if defined(CONFIG_XEN_IA64_USE_PKR) + if (vpsr.pk) { + vcpu_set_psr_pk_handling(vcpu); + newpsr.pk = 1; + PSCBX(vcpu, pkr_flags) |= PKR_IN_USE; + } + else PSCBX(vcpu, pkr_flags) &= ~PKR_IN_USE; +#else + if (vpsr.pk) { + printk("%s: protection keys not supported\n", __func__); + return IA64_ILLOP_FAULT; + } +#endif regs->cr_ipsr = newpsr.val; @@ -1385,9 +1488,11 @@ IA64FAULT vcpu_rfi(VCPU * vcpu) IA64FAULT vcpu_rfi(VCPU * vcpu) { u64 ifs; + IA64FAULT fault; REGS *regs = vcpu_regs(vcpu); - vcpu_set_psr(vcpu, PSCB(vcpu, ipsr)); + if ((fault = vcpu_set_psr(vcpu, PSCB(vcpu, ipsr))) != IA64_NO_FAULT) + return fault; ifs = PSCB(vcpu, ifs); if (ifs & 0x8000000000000000UL) @@ -1626,6 +1731,9 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 unsigned long pta, rid, rr; union pte_flags pte; TR_ENTRY *trp; +#if defined(CONFIG_XEN_IA64_USE_PKR) + unsigned long key = 0; +#endif if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) { // dom0 may generate an uncacheable physical address (msb=1) @@ -1716,6 +1824,9 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) { pte.val = address & _PAGE_PPN_MASK; pte.val = pte.val | optf->im_reg7.pgprot; +#if defined(CONFIG_XEN_IA64_USE_PKR) + key = optf->im_reg7.key; +#endif goto out; } return is_data ? IA64_ALT_DATA_TLB_VECTOR : @@ -1741,7 +1852,11 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 /* found mapping in guest VHPT! */ out: +#if defined(CONFIG_XEN_IA64_USE_PKR) + *itir = (rr & RR_PS_MASK) | (key << IA64_ITIR_KEY); +#else *itir = rr & RR_PS_MASK; +#endif *pteval = pte.val; perfc_incr(vhpt_translate); return IA64_NO_FAULT; @@ -2057,6 +2172,13 @@ IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 r IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval) { +#if defined(CONFIG_XEN_IA64_USE_PKR) + if (reg > NPKRS) /* index to large */ + return IA64_RSVDREG_FAULT; + *pval = (u64) ia64_get_pkr(reg); + return IA64_NO_FAULT; +#endif + #ifndef PKR_USE_FIXED printk("vcpu_get_pkr: called, not implemented yet\n"); return IA64_ILLOP_FAULT; @@ -2069,6 +2191,30 @@ IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val) { +#if defined(CONFIG_XEN_IA64_USE_PKR) + if (! VMX_DOMAIN(vcpu)) { + ia64_pkr_t pkr_new, pkr_cur; + if (reg >= NPKRS) /* index to large */ + return IA64_RSVDREG_FAULT; + pkr_new.val = val; + if (pkr_new.reserved1) /* reserved fields */ + return IA64_RSVDREG_FAULT; + if (pkr_new.reserved2) /* reserved fields */ + return IA64_RSVDREG_FAULT; + /* Invalidate pkr's with the same key. */ + int i; + for (i = 0; i < NPKRS; i++) { + pkr_cur.val = PSCBX(vcpu, pkrs[i]); + if (pkr_cur.key == pkr_new.key) { + pkr_cur.v = 0; + PSCBX(vcpu, pkrs[i]) = pkr_cur.val; + } + } + PSCBX(vcpu, pkrs[reg]) = pkr_new.val; + ia64_set_pkr(reg, pkr_new.val); + return IA64_NO_FAULT; + } +#endif /* defined(CONFIG_XEN_IA64_USE_PKR) */ #ifndef PKR_USE_FIXED printk("vcpu_set_pkr: called, not implemented yet\n"); return IA64_ILLOP_FAULT; @@ -2211,9 +2357,15 @@ IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 VCPU translation cache access routines **************************************************************************/ +#if defined(CONFIG_XEN_IA64_USE_PKR) +void +vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte, + u64 mp_pte, u64 logps, u64 key, struct p2m_entry *entry) +#else void vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte, u64 mp_pte, u64 logps, struct p2m_entry *entry) +#endif { unsigned long psr; unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT; @@ -2229,7 +2381,11 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry); psr = ia64_clear_ic(); pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits. +#if defined(CONFIG_XEN_IA64_USE_PKR) + ia64_itc_PKR(IorD, vaddr, pte, ps, key); +#else ia64_itc(IorD, vaddr, pte, ps); // FIXME: look for bigger mappings +#endif ia64_set_psr(psr); // ia64_srlz_i(); // no srls req'd, will rfi later if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) { @@ -2238,19 +2394,34 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, // ever happens. //printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L< PAGE_SHIFT) +#if defined(CONFIG_XEN_IA64_USE_PKR) + vhpt_multiple_insert(vaddr, pte, logps, key); +#else vhpt_multiple_insert(vaddr, pte, logps); +#endif else +#if defined(CONFIG_XEN_IA64_USE_PKR) + vhpt_insert(vaddr, pte, IA64_ITIR_PS_KEY(logps, key)); +#else vhpt_insert(vaddr, pte, logps << 2); +#endif } // even if domain pagesize is larger than PAGE_SIZE, just put // PAGE_SIZE mapping in the vhpt for now, else purging is complicated else +#if defined(CONFIG_XEN_IA64_USE_PKR) + vhpt_insert(vaddr, pte, IA64_ITIR_PS_KEY(logps, key)); +#else vhpt_insert(vaddr, pte, PAGE_SHIFT << 2); +#endif } IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa) { unsigned long pteval, logps = itir_ps(itir); +#if defined(CONFIG_XEN_IA64_USE_PKR) + u64 key = itir_key(itir); +#endif BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode)); struct p2m_entry entry; @@ -2260,12 +2431,20 @@ IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pt again: //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize +#if defined(CONFIG_XEN_IA64_USE_PKR) + pteval = translate_domain_pte(pte, ifa, itir, &logps, &key, &entry); +#else pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry); +#endif if (!pteval) return IA64_ILLOP_FAULT; if (swap_rr0) set_one_rr(0x0, PSCB(vcpu, rrs[0])); +#if defined(CONFIG_XEN_IA64_USE_PKR) + vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, key, &entry); +#else vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry); +#endif if (swap_rr0) set_metaphysical_rr0(); if (p2m_entry_retry(&entry)) { @@ -2279,6 +2458,9 @@ IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pt IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa) { unsigned long pteval, logps = itir_ps(itir); +#if defined(CONFIG_XEN_IA64_USE_PKR) + u64 key = itir_key(itir); +#endif BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode)); struct p2m_entry entry; @@ -2287,12 +2469,20 @@ IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pt "smaller page size!\n"); again: //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize +#if defined(CONFIG_XEN_IA64_USE_PKR) + pteval = translate_domain_pte(pte, ifa, itir, &logps, &key, &entry); +#else pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry); +#endif if (!pteval) return IA64_ILLOP_FAULT; if (swap_rr0) set_one_rr(0x0, PSCB(vcpu, rrs[0])); +#if defined(CONFIG_XEN_IA64_USE_PKR) + vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, key, &entry); +#else vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry); +#endif if (swap_rr0) set_metaphysical_rr0(); if (p2m_entry_retry(&entry)) { diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/arch/ia64/xen/vhpt.c --- a/xen/arch/ia64/xen/vhpt.c Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/arch/ia64/xen/vhpt.c Wed Jul 11 15:37:09 2007 +0200 @@ -71,7 +71,11 @@ vhpt_erase(unsigned long vhpt_maddr) // initialize cache too??? } +#if defined(CONFIG_XEN_IA64_USE_PKR) +void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir) +#else void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps) +#endif { struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr); unsigned long tag = ia64_ttag (vadr); @@ -80,12 +84,21 @@ void vhpt_insert (unsigned long vadr, un * because the processor may support speculative VHPT walk. */ vlfe->ti_tag = INVALID_TI_TAG; wmb(); +#if defined(CONFIG_XEN_IA64_USE_PKR) + vlfe->itir = itir; +#else vlfe->itir = logps; +#endif vlfe->page_flags = pte | _PAGE_P; *(volatile unsigned long*)&vlfe->ti_tag = tag; } +#if defined(CONFIG_XEN_IA64_USE_PKR) +void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, + unsigned long logps, unsigned long key) +#else void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps) +#endif { unsigned long mask = (1L << logps) - 1; int i; @@ -110,7 +123,11 @@ void vhpt_multiple_insert(unsigned long vaddr &= ~mask; pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK); for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) { +#if defined(CONFIG_XEN_IA64_USE_PKR) + vhpt_insert(vaddr,pte,IA64_ITIR_PS_KEY(logps, key)); +#else vhpt_insert(vaddr,pte,logps<<2); +#endif vaddr += PAGE_SIZE; } } diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/include/asm-ia64/domain.h --- a/xen/include/asm-ia64/domain.h Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/include/asm-ia64/domain.h Wed Jul 11 15:37:09 2007 +0200 @@ -183,6 +183,12 @@ struct arch_domain { #define HAS_PERVCPU_VHPT(d) (0) #endif +#if defined(CONFIG_XEN_IA64_USE_PKR) +#define NPKRS 15 /* Number of protection key registers for PV */ + /* A pkr value for xen: key = 0, valid = 1. */ +#define XEN_PKR_VAL (0x0 << IA64_PKR_KEY | IA64_PKR_VALID) +#endif /* defined(CONFIG_XEN_IA64_USE_PKR) */ + struct arch_vcpu { /* Save the state of vcpu. @@ -239,6 +245,13 @@ struct arch_vcpu { struct timer hlt_timer; struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */ +#if defined(CONFIG_XEN_IA64_USE_PKR) + /* Simple emulation of protection keys for PV domains. */ + unsigned long pkrs[NPKRS+1]; /* protection key regs, 1 for xen */ +#define PKR_IN_USE 0x1 /* The domain uses protection keys. */ + unsigned char pkr_flags; +#endif + #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT PTA pta; unsigned long vhpt_maddr; diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/include/asm-ia64/mm.h Wed Jul 11 15:37:09 2007 +0200 @@ -447,7 +447,12 @@ extern unsigned long dom0vp_expose_p2m(s extern volatile unsigned long *mpt_table; extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); +#if defined(CONFIG_XEN_IA64_USE_PKR) +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, + u64* key, struct p2m_entry* entry); +#else extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry); +#endif #define machine_to_phys_mapping mpt_table #define INVALID_M2P_ENTRY (~0UL) diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/include/asm-ia64/vcpu.h --- a/xen/include/asm-ia64/vcpu.h Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/include/asm-ia64/vcpu.h Wed Jul 11 15:37:09 2007 +0200 @@ -125,6 +125,9 @@ extern IA64FAULT vcpu_get_rr(VCPU * vcpu extern IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval); extern IA64FAULT vcpu_get_rr_ve(VCPU * vcpu, u64 vadr); /* protection key registers */ +#if defined(CONFIG_XEN_IA64_USE_PKR) +extern void vcpu_load_pkr_regs(VCPU * vcpu); +#endif extern IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval); extern IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val); extern IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key); @@ -166,8 +169,13 @@ extern BOOLEAN vcpu_timer_expired(VCPU * extern BOOLEAN vcpu_timer_expired(VCPU * vcpu); extern u64 vcpu_deliverable_interrupts(VCPU * vcpu); struct p2m_entry; +#if defined(CONFIG_XEN_IA64_USE_PKR) +extern void vcpu_itc_no_srlz(VCPU * vcpu, u64, u64, u64, u64, u64, u64, + struct p2m_entry *); +#else extern void vcpu_itc_no_srlz(VCPU * vcpu, u64, u64, u64, u64, u64, struct p2m_entry *); +#endif extern u64 vcpu_get_tmp(VCPU *, u64); extern void vcpu_set_tmp(VCPU *, u64, u64); @@ -182,6 +190,11 @@ static inline u64 itir_ps(u64 itir) static inline u64 itir_ps(u64 itir) { return ((itir >> 2) & 0x3f); +} + +static inline u64 itir_key(u64 itir) +{ + return ((itir & IA64_ITIR_KEY_MASK) >> IA64_ITIR_KEY); } static inline u64 itir_mask(u64 itir) diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/include/asm-ia64/vhpt.h --- a/xen/include/asm-ia64/vhpt.h Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/include/asm-ia64/vhpt.h Wed Jul 11 15:37:09 2007 +0200 @@ -37,10 +37,16 @@ struct vhpt_lf_entry { extern void vhpt_init (void); extern void gather_vhpt_stats(void); +#if defined(CONFIG_XEN_IA64_USE_PKR) +extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, + unsigned long logps, unsigned long key); +void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir); +#else extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps); extern void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps); +#endif void local_vhpt_flush(void); extern void vcpu_vhpt_flush(struct vcpu* v); diff -r 87b0b6a08dbd -r 44ccb8aa58cc xen/include/asm-ia64/xenkregs.h --- a/xen/include/asm-ia64/xenkregs.h Mon Jul 9 09:22:58 2007 -0600 +++ b/xen/include/asm-ia64/xenkregs.h Wed Jul 11 15:37:09 2007 +0200 @@ -47,4 +47,22 @@ #define IA64_ITIR_PS_KEY(_ps, _key) (((_ps) << IA64_ITIR_PS) | \ (((_key) << IA64_ITIR_KEY))) +#if defined(CONFIG_XEN_IA64_USE_PKR) + +/* Define Protection Key Register (PKR) */ +#define IA64_PKR_V 0 +#define IA64_PKR_WD 1 +#define IA64_PKR_RD 2 +#define IA64_PKR_XD 3 +#define IA64_PKR_MBZ0 4 +#define IA64_PKR_KEY 8 +#define IA64_PKR_KEY_LEN 24 +#define IA64_PKR_MBZ1 32 + +#define IA64_PKR_VALID (1 << IA64_PKR_V) +#define IA64_PKR_KEY_MASK (((__IA64_UL(1)<