--- xen/arch/ia64/Rules.mk Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/Rules.mk Wed Oct 11 16:10:40 2006 -0400 @@ -5,6 +5,8 @@ HAS_VGA := y HAS_VGA := y VALIDATE_VT ?= n no_warns ?= n +xen_ia64_expose_p2m ?= y +xen_ia64_pervcpu_vhpt ?= y ifneq ($(COMPILE_ARCH),$(TARGET_ARCH)) CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux- @@ -36,6 +38,12 @@ ifeq ($(VALIDATE_VT),y) ifeq ($(VALIDATE_VT),y) CFLAGS += -DVALIDATE_VT endif +ifeq ($(xen_ia64_expose_p2m),y) +CFLAGS += -DCONFIG_XEN_IA64_EXPOSE_P2M +endif +ifeq ($(xen_ia64_pervcpu_vhpt),y) +CFLAGS += -DCONFIG_XEN_IA64_PERVCPU_VHPT +endif ifeq ($(no_warns),y) CFLAGS += -Wa,--fatal-warnings -Werror -Wno-uninitialized endif --- xen/arch/ia64/asm-offsets.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/asm-offsets.c Wed Oct 11 16:10:40 2006 -0400 @@ -37,6 +37,8 @@ void foo(void) DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, init_stack)); BLANK(); + DEFINE(VCPU_VTM_OFFSET_OFS, offsetof(struct vcpu, arch.arch_vmx.vtm.vtm_offset)); + DEFINE(VCPU_VRR0_OFS, offsetof(struct vcpu, arch.arch_vmx.vrr[0])); #ifdef VTI_DEBUG DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current)); DEFINE(IVT_DBG_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_debug)); --- xen/arch/ia64/linux-xen/sal.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/linux-xen/sal.c Wed Oct 11 16:10:40 2006 -0400 @@ -16,8 +16,10 @@ #ifdef XEN #include +#include #include #endif +#include #include #include #include @@ -218,6 +220,77 @@ static void __init sal_desc_ap_wakeup(vo static void __init sal_desc_ap_wakeup(void *p) { } #endif +/* + * HP rx5670 firmware polls for interrupts during SAL_CACHE_FLUSH by reading + * cr.ivr, but it never writes cr.eoi. This leaves any interrupt marked as + * "in-service" and masks other interrupts of equal or lower priority. + * + * HP internal defect reports: F1859, F2775, F3031. + */ +static int sal_cache_flush_drops_interrupts; + +static void __init +check_sal_cache_flush (void) +{ + unsigned long flags, itv; + int cpu; + u64 vector; + + cpu = get_cpu(); + local_irq_save(flags); + + /* + * Schedule a timer interrupt, wait until it's reported, and see if + * SAL_CACHE_FLUSH drops it. + */ + itv = ia64_get_itv(); + BUG_ON((itv & (1 << 16)) == 0); + + ia64_set_itv(IA64_TIMER_VECTOR); + ia64_set_itm(ia64_get_itc() + 1000); + + while (!ia64_get_irr(IA64_TIMER_VECTOR)) + cpu_relax(); + + ia64_sal_cache_flush(3); + + if (ia64_get_irr(IA64_TIMER_VECTOR)) { + vector = ia64_get_ivr(); + ia64_eoi(); + } else { + sal_cache_flush_drops_interrupts = 1; + printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; " + "PAL_CACHE_FLUSH will be used instead\n"); + ia64_eoi(); + } + + ia64_set_itv(itv); + local_irq_restore(flags); + put_cpu(); +} + +s64 +ia64_sal_cache_flush (u64 cache_type) +{ + struct ia64_sal_retval isrv; + + if (sal_cache_flush_drops_interrupts) { + unsigned long flags; + u64 progress; + s64 rc; + + progress = 0; + local_irq_save(flags); + rc = ia64_pal_cache_flush(cache_type, + PAL_CACHE_FLUSH_INVALIDATE, &progress, NULL); + local_irq_restore(flags); + return rc; + } + + SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); + return isrv.status; +} + void __init ia64_sal_init (struct ia64_sal_systab *systab) { @@ -271,6 +344,8 @@ ia64_sal_init (struct ia64_sal_systab *s } p += SAL_DESC_SIZE(*p); } + + check_sal_cache_flush(); } int --- xen/arch/ia64/linux-xen/unaligned.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/linux-xen/unaligned.c Wed Oct 11 16:10:40 2006 -0400 @@ -304,7 +304,7 @@ set_rse_reg (struct pt_regs *regs, unsig unsigned long *bsp, *bspstore, *addr, *rnat_addr; unsigned long *kbs = (void *) current + IA64_RBS_OFFSET; unsigned long nat_mask; - unsigned long old_rsc,new_rsc; + unsigned long old_rsc, new_rsc, psr; unsigned long rnat; long sof = (regs->cr_ifs) & 0x7f; long sor = 8 * ((regs->cr_ifs >> 14) & 0xf); @@ -321,16 +321,17 @@ set_rse_reg (struct pt_regs *regs, unsig ridx = rotate_reg(sor, rrb_gr, ridx); old_rsc=ia64_get_rsc(); - new_rsc=old_rsc&(~0x3); + /* put RSC to lazy mode, and set loadrs 0 */ + new_rsc = old_rsc & (~0x3fff0003); ia64_set_rsc(new_rsc); - + bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */ + + addr = ia64_rse_skip_regs(bsp, -sof + ridx); + nat_mask = 1UL << ia64_rse_slot_num(addr); + rnat_addr = ia64_rse_rnat_addr(addr); + + local_irq_save(psr); bspstore = (unsigned long*)ia64_get_bspstore(); - bsp =kbs + (regs->loadrs >> 19);//16+3 - - addr = ia64_rse_skip_regs(bsp, -sof + ridx); - nat_mask = 1UL << ia64_rse_slot_num(addr); - rnat_addr = ia64_rse_rnat_addr(addr); - if(addr >= bspstore){ ia64_flushrs (); @@ -358,6 +359,7 @@ set_rse_reg (struct pt_regs *regs, unsig ia64_set_bspstore (bspstore); ia64_set_rnat(rnat); } + local_irq_restore(psr); ia64_set_rsc(old_rsc); } --- xen/arch/ia64/vmx/Makefile Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/Makefile Wed Oct 11 16:10:40 2006 -0400 @@ -17,3 +17,4 @@ obj-y += vmx_virt.o obj-y += vmx_virt.o obj-y += vmx_vsa.o obj-y += vtlb.o +obj-y += optvfault.o --- xen/arch/ia64/vmx/mmio.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/mmio.c Wed Oct 11 16:10:40 2006 -0400 @@ -428,7 +428,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad IA64_BUNDLE bundle; int slot, dir=0, inst_type; size_t size; - u64 data, value,post_update, slot1a, slot1b, temp; + u64 data, post_update, slot1a, slot1b, temp; INST64 inst; regs=vcpu_regs(vcpu); if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) { @@ -454,7 +454,6 @@ void emulate_io_inst(VCPU *vcpu, u64 pad vcpu_get_gr_nat(vcpu,inst.M4.r2,&data); }else if((inst.M1.x6>>2)<0xb){ // read dir=IOREQ_READ; - vcpu_get_gr_nat(vcpu,inst.M1.r1,&value); } } // Integer Load + Reg update @@ -462,7 +461,6 @@ void emulate_io_inst(VCPU *vcpu, u64 pad inst_type = SL_INTEGER; dir = IOREQ_READ; //write size = (inst.M2.x6&0x3); - vcpu_get_gr_nat(vcpu,inst.M2.r1,&value); vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp); vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update); temp += post_update; @@ -485,7 +483,6 @@ void emulate_io_inst(VCPU *vcpu, u64 pad }else if((inst.M3.x6>>2)<0xb){ // read dir=IOREQ_READ; - vcpu_get_gr_nat(vcpu,inst.M3.r1,&value); vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp); post_update = (inst.M3.i<<7)+inst.M3.imm7; if(inst.M3.s) @@ -597,13 +594,6 @@ void emulate_io_inst(VCPU *vcpu, u64 pad mmio_access(vcpu, padr, &data, size, ma, dir); }else{ mmio_access(vcpu, padr, &data, size, ma, dir); - if(size==1) - data = (value & 0xffffffffffffff00U) | (data & 0xffU); - else if(size==2) - data = (value & 0xffffffffffff0000U) | (data & 0xffffU); - else if(size==4) - data = (value & 0xffffffff00000000U) | (data & 0xffffffffU); - if(inst_type==SL_INTEGER){ //gp vcpu_set_gr(vcpu,inst.M1.r1,data,0); }else{ --- xen/arch/ia64/vmx/vlsapic.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vlsapic.c Wed Oct 11 16:10:40 2006 -0400 @@ -298,7 +298,7 @@ static void update_vhpi(VCPU *vcpu, int // TODO: Add support for XENO if ( VCPU(vcpu,vac).a_int ) { ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, - (uint64_t) &(vcpu->arch.privregs), 0, 0,0,0,0,0); + (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0); } } @@ -683,9 +683,5 @@ void vhpi_detection(VCPU *vcpu) void vmx_vexirq(VCPU *vcpu) { - static uint64_t vexirq_count=0; - - vexirq_count ++; - printk("Virtual ex-irq %ld\n", vexirq_count); generate_exirq (vcpu); } --- xen/arch/ia64/vmx/vmmu.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmmu.c Wed Oct 11 16:10:40 2006 -0400 @@ -456,7 +456,15 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 } #endif pte &= ~PAGE_FLAGS_RV_MASK; - thash_purge_entries(vcpu, va, ps); + + /* This is a bad workaround + In Linux, region 7 use 16M pagesize and is identity mapped. + VHPT page size is 16K in XEN. If purge VHPT while guest insert 16M, + it will iteratively purge VHPT 1024 times, which makes XEN/IPF very + slow. XEN doesn't purge VHPT + */ + if (ps != _PAGE_SIZE_16M) + thash_purge_entries(vcpu, va, ps); gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT; if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn)) pte |= VTLB_PTE_IO; @@ -637,37 +645,30 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 visr.ei=pt_isr.ei; visr.ir=pt_isr.ir; vpsr.val = VCPU(vcpu, vpsr); - if(vpsr.ic==0){ - visr.ni=1; - } visr.na=1; data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); if(data){ if(data->p==0){ - visr.na=1; vcpu_set_isr(vcpu,visr.val); - page_not_present(vcpu, vadr); + data_page_not_present(vcpu, vadr); return IA64_FAULT; }else if(data->ma == VA_MATTR_NATPAGE){ - visr.na = 1; vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; }else{ *padr = ((data->ppn >> (data->ps - 12)) << data->ps) | - (vadr & (PSIZE(data->ps) - 1)); + (vadr & (PSIZE(data->ps) - 1)); return IA64_NO_FAULT; } } data = vhpt_lookup(vadr); if(data){ if(data->p==0){ - visr.na=1; vcpu_set_isr(vcpu,visr.val); - page_not_present(vcpu, vadr); + data_page_not_present(vcpu, vadr); return IA64_FAULT; }else if(data->ma == VA_MATTR_NATPAGE){ - visr.na = 1; vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; --- xen/arch/ia64/vmx/vmx_entry.S Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmx_entry.S Wed Oct 11 16:10:40 2006 -0400 @@ -669,7 +669,7 @@ 1: // re-pin mappings for guest_vhpt - mov r24=IA64_TR_PERVP_VHPT + mov r24=IA64_TR_VHPT movl r25=PAGE_KERNEL ;; or loc5 = r25,loc5 // construct PA | page properties --- xen/arch/ia64/vmx/vmx_init.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmx_init.c Wed Oct 11 16:10:40 2006 -0400 @@ -378,7 +378,8 @@ static void vmx_build_physmap_table(stru for (j = io_ranges[i].start; j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE) - __assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable); + (void)__assign_domain_page(d, j, io_ranges[i].type, + ASSIGN_writable); } /* Map normal memory below 3G */ --- xen/arch/ia64/vmx/vmx_interrupt.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmx_interrupt.c Wed Oct 11 16:10:40 2006 -0400 @@ -383,14 +383,29 @@ dnat_page_consumption (VCPU *vcpu, uint6 /* Deal with * Page not present vector */ -void -page_not_present(VCPU *vcpu, u64 vadr) +static void +__page_not_present(VCPU *vcpu, u64 vadr) { /* If vPSR.ic, IFA, ITIR */ set_ifa_itir_iha (vcpu, vadr, 1, 1, 0); inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); } + +void +data_page_not_present(VCPU *vcpu, u64 vadr) +{ + __page_not_present(vcpu, vadr); +} + + +void +inst_page_not_present(VCPU *vcpu, u64 vadr) +{ + __page_not_present(vcpu, vadr); +} + + /* Deal with * Data access rights vector */ --- xen/arch/ia64/vmx/vmx_ivt.S Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmx_ivt.S Wed Oct 11 16:10:40 2006 -0400 @@ -772,12 +772,20 @@ ENTRY(vmx_single_step_trap) VMX_REFLECT(36) END(vmx_single_step_trap) + .global vmx_virtualization_fault_back .org vmx_ia64_ivt+0x6100 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault ENTRY(vmx_virtualization_fault) // VMX_DBG_FAULT(37) mov r31=pr + ;; + cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 + cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 + (p6) br.dptk.many asm_mov_from_ar + (p7) br.dptk.many asm_mov_from_rr + ;; +vmx_virtualization_fault_back: mov r19=37 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21 adds r17 = IA64_VCPU_OPCODE_OFFSET,r21 --- xen/arch/ia64/vmx/vmx_phy_mode.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmx_phy_mode.c Wed Oct 11 16:10:40 2006 -0400 @@ -126,10 +126,16 @@ vmx_init_all_rr(VCPU *vcpu) vmx_init_all_rr(VCPU *vcpu) { VMX(vcpu, vrr[VRN0]) = 0x38; + // enable vhpt in guest physical mode + vcpu->arch.metaphysical_rr0 |= 1; + vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38); VMX(vcpu, vrr[VRN1]) = 0x38; VMX(vcpu, vrr[VRN2]) = 0x38; VMX(vcpu, vrr[VRN3]) = 0x38; VMX(vcpu, vrr[VRN4]) = 0x38; + // enable vhpt in guest physical mode + vcpu->arch.metaphysical_rr4 |= 1; + vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38); VMX(vcpu, vrr[VRN5]) = 0x38; VMX(vcpu, vrr[VRN6]) = 0x38; VMX(vcpu, vrr[VRN7]) = 0x738; @@ -141,10 +147,8 @@ vmx_load_all_rr(VCPU *vcpu) vmx_load_all_rr(VCPU *vcpu) { unsigned long psr; - ia64_rr phy_rr; local_irq_save(psr); - /* WARNING: not allow co-exist of both virtual mode and physical * mode in same region @@ -154,24 +158,16 @@ vmx_load_all_rr(VCPU *vcpu) panic_domain(vcpu_regs(vcpu), "Unexpected domain switch in phy emul\n"); } - phy_rr.rrval = vcpu->arch.metaphysical_rr0; - //phy_rr.ps = PAGE_SHIFT; - phy_rr.ve = 1; - - ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval); - ia64_dv_serialize_data(); - phy_rr.rrval = vcpu->arch.metaphysical_rr4; - //phy_rr.ps = PAGE_SHIFT; - phy_rr.ve = 1; - - ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval); + ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); + ia64_dv_serialize_data(); + ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); ia64_dv_serialize_data(); } else { ia64_set_rr((VRN0 << VRN_SHIFT), - vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0]))); + vcpu->arch.metaphysical_saved_rr0); ia64_dv_serialize_data(); ia64_set_rr((VRN4 << VRN_SHIFT), - vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4]))); + vcpu->arch.metaphysical_saved_rr4); ia64_dv_serialize_data(); } @@ -209,21 +205,11 @@ switch_to_physical_rid(VCPU *vcpu) switch_to_physical_rid(VCPU *vcpu) { UINT64 psr; - ia64_rr phy_rr, mrr; - /* Save original virtual mode rr[0] and rr[4] */ psr=ia64_clear_ic(); - phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0; - mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT); - phy_rr.ps = mrr.ps; - phy_rr.ve = 1; - ia64_set_rr(VRN0<domain->arch.metaphysical_rr4; - mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT); - phy_rr.ps = mrr.ps; - phy_rr.ve = 1; - ia64_set_rr(VRN4<arch.metaphysical_rr0); + ia64_srlz_d(); + ia64_set_rr(VRN4<arch.metaphysical_rr4); ia64_srlz_d(); ia64_set_psr(psr); @@ -236,15 +222,10 @@ switch_to_virtual_rid(VCPU *vcpu) switch_to_virtual_rid(VCPU *vcpu) { UINT64 psr; - ia64_rr mrr; - psr=ia64_clear_ic(); - - vcpu_get_rr(vcpu,VRN0<arch.metaphysical_saved_rr0); + ia64_srlz_d(); + ia64_set_rr(VRN4<arch.metaphysical_saved_rr4); ia64_srlz_d(); ia64_set_psr(psr); ia64_srlz_i(); --- xen/arch/ia64/vmx/vmx_process.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmx_process.c Wed Oct 11 16:10:40 2006 -0400 @@ -81,6 +81,7 @@ void vmx_reflect_interruption(UINT64 ifa void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim, UINT64 vector,REGS *regs) { + UINT64 status; VCPU *vcpu = current; UINT64 vpsr = VCPU(vcpu, vpsr); vector=vec2off[vector]; @@ -89,13 +90,23 @@ void vmx_reflect_interruption(UINT64 ifa } else{ // handle fpswa emulation // fp fault - if(vector == IA64_FP_FAULT_VECTOR && !handle_fpu_swa(1, regs, isr)){ - vmx_vcpu_increment_iip(vcpu); - return; + if (vector == IA64_FP_FAULT_VECTOR) { + status = handle_fpu_swa(1, regs, isr); + if (!status) { + vmx_vcpu_increment_iip(vcpu); + return; + } else if (IA64_RETRY == status) + return; } //fp trap - else if(vector == IA64_FP_TRAP_VECTOR && !handle_fpu_swa(0, regs, isr)){ - return; + else if (vector == IA64_FP_TRAP_VECTOR) { + status = handle_fpu_swa(0, regs, isr); + if (!status) + return; + else if (IA64_RETRY == status) { + vmx_vcpu_decrement_iip(vcpu); + return; + } } } VCPU(vcpu,isr)=isr; @@ -187,7 +198,7 @@ void leave_hypervisor_tail(struct pt_reg { struct domain *d = current->domain; struct vcpu *v = current; - int callback_irq; + // FIXME: Will this work properly if doing an RFI??? if (!is_idle_domain(d) ) { // always comes from guest // struct pt_regs *user_regs = vcpu_regs(current); @@ -215,11 +226,14 @@ void leave_hypervisor_tail(struct pt_reg // v->arch.irq_new_pending = 1; // } - callback_irq = d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ]; - if (callback_irq != 0 && local_events_need_delivery()) { - /*inject para-device call back irq*/ - v->vcpu_info->evtchn_upcall_mask = 1; - vmx_vcpu_pend_interrupt(v, callback_irq); + if (v->vcpu_id == 0) { + int callback_irq = + d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ]; + if (callback_irq != 0 && local_events_need_delivery()) { + /*inject para-device call back irq*/ + v->vcpu_info->evtchn_upcall_mask = 1; + vmx_vcpu_pend_interrupt(v, callback_irq); + } } if ( v->arch.irq_new_pending ) { @@ -252,18 +266,20 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs) { IA64_PSR vpsr; - int type=ISIDE_TLB; + int type; u64 vhpt_adr, gppa, pteval, rr, itir; ISR misr; -// REGS *regs; thash_data_t *data; VCPU *v = current; -#ifdef VTLB_DEBUG - check_vtlb_sanity(vtlb); - dump_vtlb(vtlb); -#endif vpsr.val = VCPU(v, vpsr); misr.val=VMX(v,cr_isr); + + if (vec == 1) + type = ISIDE_TLB; + else if (vec == 2) + type = DSIDE_TLB; + else + panic_domain(regs, "wrong vec:%lx\n", vec); if(is_physical_mode(v)&&(!(vadr<<1>>62))){ if(vec==2){ @@ -275,11 +291,6 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r physical_tlb_miss(v, vadr); return IA64_FAULT; } - if(vec == 1) type = ISIDE_TLB; - else if(vec == 2) type = DSIDE_TLB; - else panic_domain(regs,"wrong vec:%lx\n",vec); - -// prepare_if_physical_mode(v); if((data=vtlb_lookup(v, vadr,type))!=0){ if (v->domain != dom0 && type == DSIDE_TLB) { @@ -298,46 +309,44 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r thash_vhpt_insert(v,data->page_flags, data->itir ,vadr); }else if(type == DSIDE_TLB){ + if (misr.sp) return vmx_handle_lds(regs); + if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){ if(vpsr.ic){ vcpu_set_isr(v, misr.val); alt_dtlb(v, vadr); return IA64_FAULT; } else{ - if(misr.sp){ - //TODO lds emulation - //panic("Don't support speculation load"); - return vmx_handle_lds(regs); - }else{ - nested_dtlb(v); - return IA64_FAULT; - } + nested_dtlb(v); + return IA64_FAULT; } } else{ vmx_vcpu_thash(v, vadr, &vhpt_adr); if(!guest_vhpt_lookup(vhpt_adr, &pteval)){ - if ((pteval & _PAGE_P) && - ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST)) { + if (!(pteval & _PAGE_P)) { + if (vpsr.ic) { + vcpu_set_isr(v, misr.val); + data_page_not_present(v, vadr); + return IA64_FAULT; + } else { + nested_dtlb(v); + return IA64_FAULT; + } + } + else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { vcpu_get_rr(v, vadr, &rr); itir = rr&(RR_RID_MASK | RR_PS_MASK); thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB); return IA64_NO_FAULT; - } - if(vpsr.ic){ + } else if (vpsr.ic) { vcpu_set_isr(v, misr.val); dtlb_fault(v, vadr); return IA64_FAULT; }else{ - if(misr.sp){ - //TODO lds emulation - //panic("Don't support speculation load"); - return vmx_handle_lds(regs); - }else{ - nested_dtlb(v); - return IA64_FAULT; - } + nested_dtlb(v); + return IA64_FAULT; } }else{ if(vpsr.ic){ @@ -345,22 +354,16 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r dvhpt_fault(v, vadr); return IA64_FAULT; }else{ - if(misr.sp){ - //TODO lds emulation - //panic("Don't support speculation load"); - return vmx_handle_lds(regs); - }else{ - nested_dtlb(v); - return IA64_FAULT; - } + nested_dtlb(v); + return IA64_FAULT; } } } }else if(type == ISIDE_TLB){ + + if (!vpsr.ic) + misr.ni = 1; if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){ - if(!vpsr.ic){ - misr.ni=1; - } vcpu_set_isr(v, misr.val); alt_itlb(v, vadr); return IA64_FAULT; @@ -372,17 +375,12 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r itir = rr&(RR_RID_MASK | RR_PS_MASK); thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB); return IA64_NO_FAULT; - } - if(!vpsr.ic){ - misr.ni=1; - } - vcpu_set_isr(v, misr.val); - itlb_fault(v, vadr); - return IA64_FAULT; + } else { + vcpu_set_isr(v, misr.val); + inst_page_not_present(v, vadr); + return IA64_FAULT; + } }else{ - if(!vpsr.ic){ - misr.ni=1; - } vcpu_set_isr(v, misr.val); ivhpt_fault(v, vadr); return IA64_FAULT; --- xen/arch/ia64/vmx/vmx_vcpu.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/vmx/vmx_vcpu.c Wed Oct 11 16:10:40 2006 -0400 @@ -172,6 +172,21 @@ IA64FAULT vmx_vcpu_increment_iip(VCPU *v } +IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu) +{ + REGS *regs = vcpu_regs(vcpu); + IA64_PSR *ipsr = (IA64_PSR *)®s->cr_ipsr; + + if (ipsr->ri == 0) { + ipsr->ri = 2; + regs->cr_iip -= 16; + } else { + ipsr->ri--; + } + return (IA64_NO_FAULT); +} + + IA64FAULT vmx_vcpu_cover(VCPU *vcpu) { REGS *regs = vcpu_regs(vcpu); @@ -197,19 +212,32 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI { ia64_rr oldrr,newrr; extern void * pal_vaddr; + u64 rrval; vcpu_get_rr(vcpu, reg, &oldrr.rrval); newrr.rrval=val; if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits)) panic_domain (NULL, "use of invalid rid %x\n", newrr.rid); - VMX(vcpu,vrr[reg>>61]) = val; - switch((u64)(reg>>61)) { + VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val; + switch((u64)(reg>>VRN_SHIFT)) { case VRN7: vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info, (void *)vcpu->arch.privregs, (void *)vcpu->arch.vhpt.hash, pal_vaddr ); break; + case VRN4: + rrval = vrrtomrr(vcpu,val); + vcpu->arch.metaphysical_saved_rr4 = rrval; + if (!is_physical_mode(vcpu)) + ia64_set_rr(reg,rrval); + break; + case VRN0: + rrval = vrrtomrr(vcpu,val); + vcpu->arch.metaphysical_saved_rr0 = rrval; + if (!is_physical_mode(vcpu)) + ia64_set_rr(reg,rrval); + break; default: ia64_set_rr(reg,vrrtomrr(vcpu,val)); break; --- xen/arch/ia64/xen/Makefile Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/Makefile Wed Oct 11 16:10:40 2006 -0400 @@ -25,5 +25,7 @@ obj-y += xentime.o obj-y += xentime.o obj-y += flushd.o obj-y += privop_stat.o +obj-y += xenpatch.o +obj-y += xencomm.o obj-$(crash_debug) += gdbstub.o --- xen/arch/ia64/xen/dom0_ops.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/dom0_ops.c Wed Oct 11 16:10:40 2006 -0400 @@ -256,6 +256,7 @@ do_dom0vp_op(unsigned long cmd, } else { ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn() } + perfc_incrc(dom0vp_phystomach); break; case IA64_DOM0VP_machtophys: if (!mfn_valid(arg0)) { @@ -263,6 +264,7 @@ do_dom0vp_op(unsigned long cmd, break; } ret = get_gpfn_from_mfn(arg0); + perfc_incrc(dom0vp_machtophys); break; case IA64_DOM0VP_zap_physmap: ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1); @@ -270,6 +272,9 @@ do_dom0vp_op(unsigned long cmd, case IA64_DOM0VP_add_physmap: ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2, (domid_t)arg3); + break; + case IA64_DOM0VP_expose_p2m: + ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3); break; default: ret = -1; --- xen/arch/ia64/xen/domain.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/domain.c Wed Oct 11 16:10:40 2006 -0400 @@ -46,6 +46,7 @@ #include #include #include +#include unsigned long dom0_size = 512*1024*1024; unsigned long dom0_align = 64*1024*1024; @@ -58,13 +59,8 @@ extern unsigned long running_on_sim; extern char dom0_command_line[]; -/* FIXME: where these declarations should be there ? */ -extern void serial_input_init(void); +/* forward declaration */ static void init_switch_stack(struct vcpu *v); -extern void vmx_do_launch(struct vcpu *); - -/* this belongs in include/asm, but there doesn't seem to be a suitable place */ -extern struct vcpu *ia64_switch_to (struct vcpu *next_task); /* Address of vpsr.i (in fact evtchn_upcall_mask) of current vcpu. This is a Xen virtual address. */ @@ -72,6 +68,16 @@ DEFINE_PER_CPU(int *, current_psr_ic_add DEFINE_PER_CPU(int *, current_psr_ic_addr); #include + +static void +ia64_disable_vhpt_walker(void) +{ + // disable VHPT. ia64_new_rr7() might cause VHPT + // fault without this because it flushes dtr[IA64_TR_VHPT] + // (VHPT_SIZE_LOG2 << 2) is just for avoid + // Reserved Register/Field fault. + ia64_set_pta(VHPT_SIZE_LOG2 << 2); +} static void flush_vtlb_for_context_switch(struct vcpu* vcpu) { @@ -96,10 +102,13 @@ static void flush_vtlb_for_context_switc if (VMX_DOMAIN(vcpu)) { // currently vTLB for vt-i domian is per vcpu. // so any flushing isn't needed. + } else if (HAS_PERVCPU_VHPT(vcpu->domain)) { + // nothing to do } else { - vhpt_flush(); + local_vhpt_flush(); } local_flush_tlb_all(); + perfc_incrc(flush_vtlb_for_context_switch); } } @@ -114,9 +123,9 @@ void schedule_tail(struct vcpu *prev) current->processor); } else { ia64_set_iva(&ia64_ivt); - ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | - VHPT_ENABLED); + ia64_disable_vhpt_walker(); load_region_regs(current); + ia64_set_pta(vcpu_pta(current)); vcpu_load_kernel_regs(current); __ia64_per_cpu_var(current_psr_i_addr) = ¤t->domain-> shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask; @@ -130,7 +139,6 @@ void context_switch(struct vcpu *prev, s void context_switch(struct vcpu *prev, struct vcpu *next) { uint64_t spsr; - uint64_t pta; local_irq_save(spsr); @@ -167,9 +175,9 @@ void context_switch(struct vcpu *prev, s nd = current->domain; if (!is_idle_domain(nd)) { - ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | - VHPT_ENABLED); + ia64_disable_vhpt_walker(); load_region_regs(current); + ia64_set_pta(vcpu_pta(current)); vcpu_load_kernel_regs(current); vcpu_set_next_timer(current); if (vcpu_timer_expired(current)) @@ -183,14 +191,13 @@ void context_switch(struct vcpu *prev, s * walker. Then all accesses happen within idle context will * be handled by TR mapping and identity mapping. */ - pta = ia64_get_pta(); - ia64_set_pta(pta & ~VHPT_ENABLED); + ia64_disable_vhpt_walker(); __ia64_per_cpu_var(current_psr_i_addr) = NULL; __ia64_per_cpu_var(current_psr_ic_addr) = NULL; } } + local_irq_restore(spsr); flush_vtlb_for_context_switch(current); - local_irq_restore(spsr); context_saved(prev); } @@ -273,6 +280,13 @@ struct vcpu *alloc_vcpu_struct(struct do if (!d->arch.is_vti) { int order; int i; + // vti domain has its own vhpt policy. + if (HAS_PERVCPU_VHPT(d)) { + if (pervcpu_vhpt_alloc(v) < 0) { + free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER); + return NULL; + } + } /* Create privregs page only if not VTi. */ order = get_order_from_shift(XMAPPEDREGS_SHIFT); @@ -315,6 +329,8 @@ struct vcpu *alloc_vcpu_struct(struct do void relinquish_vcpu_resources(struct vcpu *v) { + if (HAS_PERVCPU_VHPT(v->domain)) + pervcpu_vhpt_free(v); if (v->arch.privregs != NULL) { free_xenheap_pages(v->arch.privregs, get_order_from_shift(XMAPPEDREGS_SHIFT)); @@ -350,6 +366,11 @@ static void init_switch_stack(struct vcp memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96); } +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT +static int opt_pervcpu_vhpt = 1; +integer_param("pervcpu_vhpt", opt_pervcpu_vhpt); +#endif + int arch_domain_create(struct domain *d) { int i; @@ -364,6 +385,11 @@ int arch_domain_create(struct domain *d) if (is_idle_domain(d)) return 0; +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT + d->arch.has_pervcpu_vhpt = opt_pervcpu_vhpt; + DPRINTK("%s:%d domain %d pervcpu_vhpt %d\n", + __func__, __LINE__, d->domain_id, d->arch.has_pervcpu_vhpt); +#endif d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT)); if (d->shared_info == NULL) goto fail_nomem; @@ -1101,9 +1127,6 @@ int construct_dom0(struct domain *d, physdev_init_dom0(d); - // FIXME: Hack for keyboard input - //serial_input_init(); - return 0; } --- xen/arch/ia64/xen/faults.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/faults.c Wed Oct 11 16:10:40 2006 -0400 @@ -228,10 +228,10 @@ void ia64_do_page_fault (unsigned long a // indicate a bad xen pointer printk("*** xen_handle_domain_access: exception table" " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n", - iip, address); + iip, address); panic_domain(regs,"*** xen_handle_domain_access: exception table" - " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n", - iip, address); + " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n", + iip, address); } return; } --- xen/arch/ia64/xen/fw_emul.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/fw_emul.c Wed Oct 11 16:10:40 2006 -0400 @@ -16,7 +16,6 @@ * */ #include -#include #include #include @@ -29,6 +28,7 @@ #include #include #include +#include extern unsigned long running_on_sim; --- xen/arch/ia64/xen/hypercall.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/hypercall.c Wed Oct 11 16:10:40 2006 -0400 @@ -32,7 +32,6 @@ #include #include -static long do_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_t) uop); static long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg); static long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg); @@ -54,10 +53,10 @@ const hypercall_t ia64_hypercall_table[N (hypercall_t)do_multicall, (hypercall_t)do_ni_hypercall, /* do_update_va_mapping */ (hypercall_t)do_ni_hypercall, /* do_set_timer_op */ /* 15 */ - (hypercall_t)do_event_channel_op_compat, + (hypercall_t)do_ni_hypercall, (hypercall_t)do_xen_version, (hypercall_t)do_console_io, - (hypercall_t)do_physdev_op_compat, + (hypercall_t)do_ni_hypercall, (hypercall_t)do_grant_table_op, /* 20 */ (hypercall_t)do_ni_hypercall, /* do_vm_assist */ (hypercall_t)do_ni_hypercall, /* do_update_va_mapping_othe */ @@ -108,19 +107,6 @@ xen_hypercall (struct pt_regs *regs) xen_hypercall (struct pt_regs *regs) { uint32_t cmd = (uint32_t)regs->r2; - struct vcpu *v = current; - - if (cmd == __HYPERVISOR_grant_table_op) { - XEN_GUEST_HANDLE(void) uop; - - v->arch.hypercall_param.va = regs->r15; - v->arch.hypercall_param.pa1 = regs->r17; - v->arch.hypercall_param.pa2 = regs->r18; - set_xen_guest_handle(uop, (void *)regs->r15); - regs->r8 = do_grant_table_op(regs->r14, uop, regs->r16); - v->arch.hypercall_param.va = 0; - return IA64_NO_FAULT; - } if (cmd < NR_hypercalls) { perfc_incra(hypercalls, cmd); @@ -133,7 +119,21 @@ xen_hypercall (struct pt_regs *regs) regs->r19); } else regs->r8 = -ENOSYS; - + + return IA64_NO_FAULT; +} + +static IA64FAULT +xen_fast_hypercall (struct pt_regs *regs) +{ + uint32_t cmd = (uint32_t)regs->r2; + switch (cmd) { + case __HYPERVISOR_ia64_fast_eoi: + regs->r8 = pirq_guest_eoi(current->domain, regs->r14); + break; + default: + regs->r8 = -ENOSYS; + } return IA64_NO_FAULT; } @@ -201,8 +201,8 @@ fw_hypercall_fpswa (struct vcpu *v) return PSCBX(v, fpswa_ret); } -static IA64FAULT -fw_hypercall (struct pt_regs *regs) +IA64FAULT +ia64_hypercall(struct pt_regs *regs) { struct vcpu *v = current; struct sal_ret_values x; @@ -213,7 +213,13 @@ fw_hypercall (struct pt_regs *regs) perfc_incra(fw_hypercall, index >> 8); switch (index) { - case FW_HYPERCALL_PAL_CALL: + case FW_HYPERCALL_XEN: + return xen_hypercall(regs); + + case FW_HYPERCALL_XEN_FAST: + return xen_fast_hypercall(regs); + + case FW_HYPERCALL_PAL_CALL: //printf("*** PAL hypercall: index=%d\n",regs->r28); //FIXME: This should call a C routine #if 0 @@ -264,7 +270,7 @@ fw_hypercall (struct pt_regs *regs) regs->r10 = y.v1; regs->r11 = y.v2; } break; - case FW_HYPERCALL_SAL_CALL: + case FW_HYPERCALL_SAL_CALL: x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33), vcpu_get_gr(v,34),vcpu_get_gr(v,35), vcpu_get_gr(v,36),vcpu_get_gr(v,37), @@ -272,44 +278,33 @@ fw_hypercall (struct pt_regs *regs) regs->r8 = x.r8; regs->r9 = x.r9; regs->r10 = x.r10; regs->r11 = x.r11; break; - case FW_HYPERCALL_SAL_RETURN: + case FW_HYPERCALL_SAL_RETURN: if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) ) vcpu_sleep_nosync(v); break; - case FW_HYPERCALL_EFI_CALL: + case FW_HYPERCALL_EFI_CALL: efi_ret_value = efi_emulator (regs, &fault); if (fault != IA64_NO_FAULT) return fault; regs->r8 = efi_ret_value; break; - case FW_HYPERCALL_IPI: + case FW_HYPERCALL_IPI: fw_hypercall_ipi (regs); break; - case FW_HYPERCALL_SET_SHARED_INFO_VA: + case FW_HYPERCALL_SET_SHARED_INFO_VA: regs->r8 = domain_set_shared_info_va (regs->r28); break; - case FW_HYPERCALL_FPSWA: + case FW_HYPERCALL_FPSWA: fpswa_ret = fw_hypercall_fpswa (v); regs->r8 = fpswa_ret.status; regs->r9 = fpswa_ret.err0; regs->r10 = fpswa_ret.err1; regs->r11 = fpswa_ret.err2; break; - default: + default: printf("unknown ia64 fw hypercall %lx\n", regs->r2); regs->r8 = do_ni_hypercall(); } return IA64_NO_FAULT; -} - -IA64FAULT -ia64_hypercall (struct pt_regs *regs) -{ - unsigned long index = regs->r2; - - if (index >= FW_HYPERCALL_FIRST_ARCH) - return fw_hypercall (regs); - else - return xen_hypercall (regs); } unsigned long hypercall_create_continuation( @@ -465,28 +460,6 @@ static long do_physdev_op(int cmd, XEN_G return ret; } -/* Legacy hypercall (as of 0x00030202). */ -static long do_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_t) uop) -{ - struct physdev_op op; - - if ( unlikely(copy_from_guest(&op, uop, 1) != 0) ) - return -EFAULT; - - return do_physdev_op(op.cmd, guest_handle_from_ptr(&uop.p->u, void)); -} - -/* Legacy hypercall (as of 0x00030202). */ -long do_event_channel_op_compat(XEN_GUEST_HANDLE(evtchn_op_t) uop) -{ - struct evtchn_op op; - - if ( unlikely(copy_from_guest(&op, uop, 1) != 0) ) - return -EFAULT; - - return do_event_channel_op(op.cmd, guest_handle_from_ptr(&uop.p->u, void)); -} - static long register_guest_callback(struct callback_register *reg) { long ret = 0; --- xen/arch/ia64/xen/mm.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/mm.c Wed Oct 11 16:10:40 2006 -0400 @@ -396,6 +396,12 @@ gmfn_to_mfn_foreign(struct domain *d, un { unsigned long pte; + // This function may be called from __gnttab_copy() + // during destruction of VT-i domain with PV-on-HVM driver. + if (unlikely(d->arch.mm.pgd == NULL)) { + if (VMX_DOMAIN(d->vcpu[0])) + return INVALID_MFN; + } pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL); if (!pte) { panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n"); @@ -702,6 +708,22 @@ void *domain_mpa_to_imva(struct domain * } #endif +unsigned long +xencomm_paddr_to_maddr(unsigned long paddr) +{ + struct vcpu *v = current; + struct domain *d = v->domain; + u64 pa; + + pa = ____lookup_domain_mpa(d, paddr); + if (pa == INVALID_MFN) { + printf("%s: called with bad memory address: 0x%lx - iip=%lx\n", + __func__, paddr, vcpu_regs(v)->cr_iip); + return 0; + } + return __va_ul((pa & _PFN_MASK) | (paddr & ~PAGE_MASK)); +} + /* Allocate a new page for domain and map it to the specified metaphysical address. */ static struct page_info * @@ -784,7 +806,7 @@ flags_to_prot (unsigned long flags) // flags: currently only ASSIGN_readonly, ASSIGN_nocache // This is called by assign_domain_mmio_page(). // So accessing to pte is racy. -void +int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags) @@ -800,8 +822,25 @@ __assign_domain_page(struct domain *d, old_pte = __pte(0); new_pte = pfn_pte(physaddr >> PAGE_SHIFT, __pgprot(prot)); ret_pte = ptep_cmpxchg_rel(&d->arch.mm, mpaddr, pte, old_pte, new_pte); - if (pte_val(ret_pte) == pte_val(old_pte)) + if (pte_val(ret_pte) == pte_val(old_pte)) { smp_mb(); + return 0; + } + + // dom0 tries to map real machine's I/O region, but failed. + // It is very likely that dom0 doesn't boot correctly because + // it can't access I/O. So complain here. + if ((flags & ASSIGN_nocache) && + (pte_pfn(ret_pte) != (physaddr >> PAGE_SHIFT) || + !(pte_val(ret_pte) & _PAGE_MA_UC))) + printk("%s:%d WARNING can't assign page domain 0x%p id %d\n" + "\talready assigned pte_val 0x%016lx\n" + "\tmpaddr 0x%016lx physaddr 0x%016lx flags 0x%lx\n", + __func__, __LINE__, + d, d->domain_id, pte_val(ret_pte), + mpaddr, physaddr, flags); + + return -EAGAIN; } /* get_page() and map a physical address to the specified metaphysical addr */ @@ -818,7 +857,7 @@ assign_domain_page(struct domain *d, set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT); // because __assign_domain_page() uses set_pte_rel() which has // release semantics, smp_mb() isn't needed. - __assign_domain_page(d, mpaddr, physaddr, ASSIGN_writable); + (void)__assign_domain_page(d, mpaddr, physaddr, ASSIGN_writable); } int @@ -841,8 +880,8 @@ ioports_permit_access(struct domain *d, lp_offset = PAGE_ALIGN(IO_SPACE_SPARSE_ENCODING(lp)); for (off = fp_offset; off <= lp_offset; off += PAGE_SIZE) - __assign_domain_page(d, IO_PORTS_PADDR + off, - __pa(ia64_iobase) + off, ASSIGN_nocache); + (void)__assign_domain_page(d, IO_PORTS_PADDR + off, + __pa(ia64_iobase) + off, ASSIGN_nocache); return 0; } @@ -911,7 +950,7 @@ assign_domain_same_page(struct domain *d //XXX optimization unsigned long end = PAGE_ALIGN(mpaddr + size); for (mpaddr &= PAGE_MASK; mpaddr < end; mpaddr += PAGE_SIZE) { - __assign_domain_page(d, mpaddr, mpaddr, flags); + (void)__assign_domain_page(d, mpaddr, mpaddr, flags); } } @@ -1035,6 +1074,7 @@ assign_domain_page_replace(struct domain put_page(old_page); } } + perfc_incrc(assign_domain_page_replace); } // caller must get_page(new_page) before @@ -1095,6 +1135,7 @@ assign_domain_page_cmpxchg_rel(struct do domain_page_flush(d, mpaddr, old_mfn, new_mfn); put_page(old_page); + perfc_incrc(assign_domain_pge_cmpxchg_rel); return 0; } @@ -1167,6 +1208,7 @@ zap_domain_page_one(struct domain *d, un try_to_clear_PGC_allocate(d, page); } put_page(page); + perfc_incrc(zap_dcomain_page_one); } unsigned long @@ -1179,6 +1221,7 @@ dom0vp_zap_physmap(struct domain *d, uns } zap_domain_page_one(d, gpfn << PAGE_SHIFT, INVALID_MFN); + perfc_incrc(dom0vp_zap_physmap); return 0; } @@ -1224,10 +1267,131 @@ dom0vp_add_physmap(struct domain* d, uns get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY); assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags); //don't update p2m table because this page belongs to rd, not d. + perfc_incrc(dom0vp_add_physmap); out1: put_domain(rd); return error; } + +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M +static struct page_info* p2m_pte_zero_page = NULL; + +void +expose_p2m_init(void) +{ + pte_t* pte; + + pte = pte_alloc_one_kernel(NULL, 0); + BUG_ON(pte == NULL); + smp_mb();// make contents of the page visible. + p2m_pte_zero_page = virt_to_page(pte); +} + +static int +expose_p2m_page(struct domain* d, unsigned long mpaddr, struct page_info* page) +{ + // we can't get_page(page) here. + // pte page is allocated form xen heap.(see pte_alloc_one_kernel().) + // so that the page has NULL page owner and it's reference count + // is useless. + // see also relinquish_pte()'s page_get_owner() == NULL check. + BUG_ON(page_get_owner(page) != NULL); + + return __assign_domain_page(d, mpaddr, page_to_maddr(page), + ASSIGN_readonly); +} + +// It is possible to optimize loop, But this isn't performance critical. +unsigned long +dom0vp_expose_p2m(struct domain* d, + unsigned long conv_start_gpfn, + unsigned long assign_start_gpfn, + unsigned long expose_size, unsigned long granule_pfn) +{ + unsigned long expose_num_pfn = expose_size >> PAGE_SHIFT; + unsigned long i; + volatile pte_t* conv_pte; + volatile pte_t* assign_pte; + + if ((expose_size % PAGE_SIZE) != 0 || + (granule_pfn % PTRS_PER_PTE) != 0 || + (expose_num_pfn % PTRS_PER_PTE) != 0 || + (conv_start_gpfn % granule_pfn) != 0 || + (assign_start_gpfn % granule_pfn) != 0 || + (expose_num_pfn % granule_pfn) != 0) { + DPRINTK("%s conv_start_gpfn 0x%016lx assign_start_gpfn 0x%016lx " + "expose_size 0x%016lx granulte_pfn 0x%016lx\n", __func__, + conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn); + return -EINVAL; + } + + if (granule_pfn != PTRS_PER_PTE) { + DPRINTK("%s granule_pfn 0x%016lx PTRS_PER_PTE 0x%016lx\n", + __func__, granule_pfn, PTRS_PER_PTE); + return -ENOSYS; + } + + // allocate pgd, pmd. + i = conv_start_gpfn; + while (i < expose_num_pfn) { + conv_pte = lookup_noalloc_domain_pte(d, (conv_start_gpfn + i) << + PAGE_SHIFT); + if (conv_pte == NULL) { + i++; + continue; + } + + assign_pte = lookup_alloc_domain_pte(d, (assign_start_gpfn << + PAGE_SHIFT) + i * sizeof(pte_t)); + if (assign_pte == NULL) { + DPRINTK("%s failed to allocate pte page\n", __func__); + return -ENOMEM; + } + + // skip to next pte page + i += PTRS_PER_PTE; + i &= ~(PTRS_PER_PTE - 1); + } + + // expose pte page + i = 0; + while (i < expose_num_pfn) { + conv_pte = lookup_noalloc_domain_pte(d, (conv_start_gpfn + i) << + PAGE_SHIFT); + if (conv_pte == NULL) { + i++; + continue; + } + + if (expose_p2m_page(d, (assign_start_gpfn << PAGE_SHIFT) + + i * sizeof(pte_t), virt_to_page(conv_pte)) < 0) { + DPRINTK("%s failed to assign page\n", __func__); + return -EAGAIN; + } + + // skip to next pte page + i += PTRS_PER_PTE; + i &= ~(PTRS_PER_PTE - 1); + } + + // expose p2m_pte_zero_page + for (i = 0; i < expose_num_pfn / PTRS_PER_PTE + 1; i++) { + assign_pte = lookup_noalloc_domain_pte(d, (assign_start_gpfn + i) << + PAGE_SHIFT); + BUG_ON(assign_pte == NULL); + if (pte_present(*assign_pte)) { + continue; + } + if (expose_p2m_page(d, (assign_start_gpfn + i) << PAGE_SHIFT, + p2m_pte_zero_page) < 0) { + DPRINTK("%s failed to assign zero-pte page\n", __func__); + return -EAGAIN; + } + } + + return 0; +} +#endif // grant table host mapping // mpaddr: host_addr: pseudo physical address @@ -1255,6 +1419,7 @@ create_grant_host_mapping(unsigned long get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY); assign_domain_page_replace(d, gpaddr, mfn, (flags & GNTMAP_readonly)? ASSIGN_readonly: ASSIGN_writable); + perfc_incrc(create_grant_host_mapping); return GNTST_okay; } @@ -1314,6 +1479,7 @@ destroy_grant_host_mapping(unsigned long BUG_ON(page_get_owner(page) == d);//try_to_clear_PGC_allocate(d, page) is not needed. put_page(page); + perfc_incrc(destroy_grant_host_mapping); return GNTST_okay; } @@ -1374,6 +1540,7 @@ steal_page(struct domain *d, struct page free_domheap_page(new); return -1; } + perfc_incrc(steal_page_refcount); } spin_lock(&d->page_alloc_lock); @@ -1443,6 +1610,7 @@ steal_page(struct domain *d, struct page list_del(&page->list); spin_unlock(&d->page_alloc_lock); + perfc_incrc(steal_page); return 0; } @@ -1460,6 +1628,8 @@ guest_physmap_add_page(struct domain *d, assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, ASSIGN_writable); //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> PAGE_SHIFT)); + + perfc_incrc(guest_physmap_add_page); } void @@ -1468,6 +1638,7 @@ guest_physmap_remove_page(struct domain { BUG_ON(mfn == 0);//XXX zap_domain_page_one(d, gpfn << PAGE_SHIFT, mfn); + perfc_incrc(guest_physmap_remove_page); } //XXX sledgehammer. @@ -1480,6 +1651,7 @@ domain_page_flush(struct domain* d, unsi shadow_mark_page_dirty(d, mpaddr >> PAGE_SHIFT); domain_flush_vtlb_all(); + perfc_incrc(domain_page_flush); } int --- xen/arch/ia64/xen/regionreg.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/regionreg.c Wed Oct 11 16:10:40 2006 -0400 @@ -260,7 +260,7 @@ int set_one_rr(unsigned long rr, unsigne } else if (rreg == 7) { ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info, v->arch.privregs, v->domain->arch.shared_info_va, - __get_cpu_var(vhpt_paddr)); + vcpu_vhpt_maddr(v)); } else { set_rr(rr,newrrv.rrval); } --- xen/arch/ia64/xen/vcpu.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/vcpu.c Wed Oct 11 16:10:40 2006 -0400 @@ -1314,12 +1314,21 @@ static inline void static inline void check_xen_space_overlap (const char *func, u64 base, u64 page_size) { + /* Overlaps can occur only in region 7. + (This is an optimization to bypass all the checks). */ + if (REGION_NUMBER(base) != 7) + return; + /* Mask LSBs of base. */ base &= ~(page_size - 1); /* FIXME: ideally an MCA should be generated... */ if (range_overlap (HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END, - base, base + page_size)) + base, base + page_size) + || range_overlap(current->domain->arch.shared_info_va, + current->domain->arch.shared_info_va + + XSI_SIZE + XMAPPEDREGS_SIZE, + base, base + page_size)) panic_domain (NULL, "%s on Xen virtual space (%lx)\n", func, base); } @@ -2217,28 +2226,3 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v return IA64_NO_FAULT; } - -int ia64_map_hypercall_param(void) -{ - struct vcpu *v = current; - struct domain *d = current->domain; - u64 vaddr = v->arch.hypercall_param.va & PAGE_MASK; - volatile pte_t* pte; - - if (v->arch.hypercall_param.va == 0) - return FALSE; - pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa1); - if (!pte || !pte_present(*pte)) - return FALSE; - vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte), -1UL, PAGE_SHIFT); - if (v->arch.hypercall_param.pa2) { - vaddr += PAGE_SIZE; - pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa2); - if (pte && pte_present(*pte)) { - vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte), - -1UL, PAGE_SHIFT); - } - } - ia64_srlz_d(); - return TRUE; -} --- xen/arch/ia64/xen/vhpt.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/vhpt.c Wed Oct 11 16:10:40 2006 -0400 @@ -3,6 +3,10 @@ * * Copyright (C) 2004 Hewlett-Packard Co * Dan Magenheimer + * + * Copyright (c) 2006 Isaku Yamahata + * VA Linux Systems Japan K.K. + * per vcpu vhpt support */ #include #include @@ -24,18 +28,32 @@ DEFINE_PER_CPU (unsigned long, vhpt_padd DEFINE_PER_CPU (unsigned long, vhpt_paddr); DEFINE_PER_CPU (unsigned long, vhpt_pend); -void vhpt_flush(void) -{ - struct vhpt_lf_entry *v = __va(__ia64_per_cpu_var(vhpt_paddr)); +static void + __vhpt_flush(unsigned long vhpt_maddr) +{ + struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr); int i; for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) v->ti_tag = INVALID_TI_TAG; } -static void vhpt_erase(void) -{ - struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR; +void +local_vhpt_flush(void) +{ + __vhpt_flush(__ia64_per_cpu_var(vhpt_paddr)); +} + +static void +vcpu_vhpt_flush(struct vcpu* v) +{ + __vhpt_flush(vcpu_vhpt_maddr(v)); +} + +static void +vhpt_erase(unsigned long vhpt_maddr) +{ + struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr); int i; for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) { @@ -45,17 +63,6 @@ static void vhpt_erase(void) v->ti_tag = INVALID_TI_TAG; } // initialize cache too??? -} - - -static void vhpt_map(unsigned long pte) -{ - unsigned long psr; - - psr = ia64_clear_ic(); - ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2); - ia64_set_psr(psr); - ia64_srlz_i(); } void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps) @@ -102,7 +109,7 @@ void vhpt_multiple_insert(unsigned long void vhpt_init(void) { - unsigned long paddr, pte; + unsigned long paddr; struct page_info *page; #if !VHPT_ENABLED return; @@ -122,14 +129,51 @@ void vhpt_init(void) __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1; printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n", paddr, __get_cpu_var(vhpt_pend)); - pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL)); - vhpt_map(pte); - ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | - VHPT_ENABLED); - vhpt_erase(); -} - - + vhpt_erase(paddr); + // we don't enable VHPT here. + // context_switch() or schedule_tail() does it. +} + +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT +int +pervcpu_vhpt_alloc(struct vcpu *v) +{ + unsigned long vhpt_size_log2 = VHPT_SIZE_LOG2; + + v->arch.vhpt_entries = + (1UL << vhpt_size_log2) / sizeof(struct vhpt_lf_entry); + v->arch.vhpt_page = + alloc_domheap_pages(NULL, vhpt_size_log2 - PAGE_SHIFT, 0); + if (!v->arch.vhpt_page) + return -ENOMEM; + + v->arch.vhpt_maddr = page_to_maddr(v->arch.vhpt_page); + if (v->arch.vhpt_maddr & ((1 << VHPT_SIZE_LOG2) - 1)) + panic("pervcpu_vhpt_init: bad VHPT alignment!\n"); + + v->arch.pta.val = 0; // to zero reserved bits + v->arch.pta.ve = 1; // enable vhpt + v->arch.pta.size = VHPT_SIZE_LOG2; + v->arch.pta.vf = 1; // long format + //v->arch.pta.base = __va(v->arch.vhpt_maddr) >> 15; + v->arch.pta.base = VHPT_ADDR >> 15; + + vhpt_erase(v->arch.vhpt_maddr); + smp_mb(); // per vcpu vhpt may be used by another physical cpu. + return 0; +} + +void +pervcpu_vhpt_free(struct vcpu *v) +{ + free_domheap_pages(v->arch.vhpt_page, VHPT_SIZE_LOG2 - PAGE_SHIFT); +} +#endif + +// SMP: we can't assume v == current, vcpu might move to another physical cpu. +// So memory barrier is necessary. +// if we can guranttee that vcpu can run on only this physical cpu +// (e.g. vcpu == current), smp_mb() is unnecessary. void vcpu_flush_vtlb_all(struct vcpu *v) { if (VMX_DOMAIN(v)) { @@ -144,9 +188,14 @@ void vcpu_flush_vtlb_all(struct vcpu *v) /* First VCPU tlb. */ vcpu_purge_tr_entry(&PSCBX(v,dtlb)); vcpu_purge_tr_entry(&PSCBX(v,itlb)); + smp_mb(); /* Then VHPT. */ - vhpt_flush(); + if (HAS_PERVCPU_VHPT(v->domain)) + vcpu_vhpt_flush(v); + else + local_vhpt_flush(); + smp_mb(); /* Then mTLB. */ local_flush_tlb_all(); @@ -155,6 +204,8 @@ void vcpu_flush_vtlb_all(struct vcpu *v) /* We could clear bit in d->domain_dirty_cpumask only if domain d in not running on this processor. There is currently no easy way to check this. */ + + perfc_incrc(vcpu_flush_vtlb_all); } static void __vcpu_flush_vtlb_all(void *vcpu) @@ -174,32 +225,59 @@ void domain_flush_vtlb_all (void) if (v->processor == cpu) vcpu_flush_vtlb_all(v); else + // SMP: it is racy to reference v->processor. + // vcpu scheduler may move this vcpu to another + // physicall processor, and change the value + // using plain store. + // We may be seeing the old value of it. + // In such case, flush_vtlb_for_context_switch() + // takes care of mTLB flush. smp_call_function_single(v->processor, __vcpu_flush_vtlb_all, v, 1, 1); } -} - -static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range) -{ - void *vhpt_base = __va(per_cpu(vhpt_paddr, cpu)); + perfc_incrc(domain_flush_vtlb_all); +} + +// Callers may need to call smp_mb() before/after calling this. +// Be carefull. +static void +__flush_vhpt_range(unsigned long vhpt_maddr, u64 vadr, u64 addr_range) +{ + void *vhpt_base = __va(vhpt_maddr); while ((long)addr_range > 0) { /* Get the VHPT entry. */ unsigned int off = ia64_thash(vadr) - VHPT_ADDR; - volatile struct vhpt_lf_entry *v; - v = vhpt_base + off; + struct vhpt_lf_entry *v = vhpt_base + off; v->ti_tag = INVALID_TI_TAG; addr_range -= PAGE_SIZE; vadr += PAGE_SIZE; } } +static void +cpu_flush_vhpt_range(int cpu, u64 vadr, u64 addr_range) +{ + __flush_vhpt_range(per_cpu(vhpt_paddr, cpu), vadr, addr_range); +} + +static void +vcpu_flush_vhpt_range(struct vcpu* v, u64 vadr, u64 addr_range) +{ + __flush_vhpt_range(vcpu_vhpt_maddr(v), vadr, addr_range); +} + void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range) { - cpu_flush_vhpt_range (current->processor, vadr, 1UL << log_range); + if (HAS_PERVCPU_VHPT(current->domain)) + vcpu_flush_vhpt_range(current, vadr, 1UL << log_range); + else + cpu_flush_vhpt_range(current->processor, + vadr, 1UL << log_range); ia64_ptcl(vadr, log_range << 2); ia64_srlz_i(); + perfc_incrc(vcpu_flush_tlb_vhpt_range); } void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range) @@ -229,19 +307,30 @@ void domain_flush_vtlb_range (struct dom if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) continue; - /* Invalidate VHPT entries. */ - cpu_flush_vhpt_range (v->processor, vadr, addr_range); + if (HAS_PERVCPU_VHPT(d)) { + vcpu_flush_vhpt_range(v, vadr, addr_range); + } else { + // SMP: it is racy to reference v->processor. + // vcpu scheduler may move this vcpu to another + // physicall processor, and change the value + // using plain store. + // We may be seeing the old value of it. + // In such case, flush_vtlb_for_context_switch() + /* Invalidate VHPT entries. */ + cpu_flush_vhpt_range(v->processor, vadr, addr_range); + } } // ptc.ga has release semantics. /* ptc.ga */ ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT); + perfc_incrc(domain_flush_vtlb_range); } static void flush_tlb_vhpt_all (struct domain *d) { /* First VHPT. */ - vhpt_flush (); + local_vhpt_flush (); /* Then mTLB. */ local_flush_tlb_all (); @@ -250,7 +339,10 @@ void domain_flush_tlb_vhpt(struct domain void domain_flush_tlb_vhpt(struct domain *d) { /* Very heavy... */ - on_each_cpu ((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1); + if (HAS_PERVCPU_VHPT(d) /* || VMX_DOMAIN(v) */) + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); + else + on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1); cpus_clear (d->domain_dirty_cpumask); } --- xen/arch/ia64/xen/xen.lds.S Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/xen.lds.S Wed Oct 11 16:10:40 2006 -0400 @@ -172,6 +172,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose * kernel data */ + + .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) + { *(.data.read_mostly) } .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) } --- xen/arch/ia64/xen/xenmem.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/xenmem.c Wed Oct 11 16:10:40 2006 -0400 @@ -17,10 +17,19 @@ #include #include -extern pgd_t frametable_pg_dir[]; - -#define frametable_pgd_offset(addr) \ - (frametable_pg_dir + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) +extern unsigned long frametable_pg_dir[]; + +#define FRAMETABLE_PGD_OFFSET(ADDR) \ + (frametable_pg_dir + (((ADDR) >> PGDIR_SHIFT) & \ + ((1UL << (PAGE_SHIFT - 3)) - 1))) + +#define FRAMETABLE_PMD_OFFSET(PGD, ADDR) \ + __va((unsigned long *)(PGD) + (((ADDR) >> PMD_SHIFT) & \ + ((1UL << (PAGE_SHIFT - 3)) - 1))) + +#define FRAMETABLE_PTE_OFFSET(PMD, ADDR) \ + (pte_t *)__va((unsigned long *)(PMD) + (((ADDR) >> PAGE_SHIFT) & \ + ((1UL << (PAGE_SHIFT - 3)) - 1))) static unsigned long table_size; static int opt_contig_mem = 0; @@ -29,13 +38,13 @@ boolean_param("contig_mem", opt_contig_m #define opt_contig_mem 1 #endif -struct page_info *frame_table; +struct page_info *frame_table __read_mostly; unsigned long max_page; /* * Set up the page tables. */ -volatile unsigned long *mpt_table; +volatile unsigned long *mpt_table __read_mostly; void paging_init (void) @@ -72,7 +81,7 @@ paging_init (void) #ifdef CONFIG_VIRTUAL_FRAME_TABLE -static inline void * +static unsigned long alloc_dir_page(void) { unsigned long mfn = alloc_boot_pages(1, 1); @@ -82,7 +91,7 @@ alloc_dir_page(void) ++table_size; dir = mfn << PAGE_SHIFT; memset(__va(dir), 0, PAGE_SIZE); - return (void *)dir; + return dir; } static inline unsigned long @@ -100,15 +109,33 @@ alloc_table_page(unsigned long fill) return mfn; } +static void +create_page_table(unsigned long start_page, unsigned long end_page, + unsigned long fill) +{ + unsigned long address; + unsigned long *dir; + pte_t *pteptr; + + for (address = start_page; address < end_page; address += PAGE_SIZE) { + dir = FRAMETABLE_PGD_OFFSET(address); + if (!*dir) + *dir = alloc_dir_page(); + dir = FRAMETABLE_PMD_OFFSET(*dir, address); + if (!*dir) + *dir = alloc_dir_page(); + pteptr = FRAMETABLE_PTE_OFFSET(*dir, address); + if (pte_none(*pteptr)) + set_pte(pteptr, pfn_pte(alloc_table_page(fill), + PAGE_KERNEL)); + } +} + static int create_frametable_page_table (u64 start, u64 end, void *arg) { - unsigned long address, start_page, end_page; struct page_info *map_start, *map_end; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + unsigned long start_page, end_page; map_start = frame_table + (__pa(start) >> PAGE_SHIFT); map_end = frame_table + (__pa(end) >> PAGE_SHIFT); @@ -116,23 +143,7 @@ create_frametable_page_table (u64 start, start_page = (unsigned long) map_start & PAGE_MASK; end_page = PAGE_ALIGN((unsigned long) map_end); - for (address = start_page; address < end_page; address += PAGE_SIZE) { - pgd = frametable_pgd_offset(address); - if (pgd_none(*pgd)) - pgd_populate(NULL, pgd, alloc_dir_page()); - pud = pud_offset(pgd, address); - - if (pud_none(*pud)) - pud_populate(NULL, pud, alloc_dir_page()); - pmd = pmd_offset(pud, address); - - if (pmd_none(*pmd)) - pmd_populate_kernel(NULL, pmd, alloc_dir_page()); - pte = pte_offset_kernel(pmd, address); - - if (pte_none(*pte)) - set_pte(pte, pfn_pte(alloc_table_page(0), PAGE_KERNEL)); - } + create_page_table(start_page, end_page, 0L); return 0; } @@ -140,11 +151,7 @@ create_mpttable_page_table (u64 start, u create_mpttable_page_table (u64 start, u64 end, void *arg) { unsigned long map_start, map_end; - unsigned long address, start_page, end_page; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + unsigned long start_page, end_page; map_start = (unsigned long)(mpt_table + (__pa(start) >> PAGE_SHIFT)); map_end = (unsigned long)(mpt_table + (__pa(end) >> PAGE_SHIFT)); @@ -152,23 +159,7 @@ create_mpttable_page_table (u64 start, u start_page = map_start & PAGE_MASK; end_page = PAGE_ALIGN(map_end); - for (address = start_page; address < end_page; address += PAGE_SIZE) { - pgd = frametable_pgd_offset(address); - if (pgd_none(*pgd)) - pgd_populate(NULL, pgd, alloc_dir_page()); - pud = pud_offset(pgd, address); - - if (pud_none(*pud)) - pud_populate(NULL, pud, alloc_dir_page()); - pmd = pmd_offset(pud, address); - - if (pmd_none(*pmd)) - pmd_populate_kernel(NULL, pmd, alloc_dir_page()); - pte = pte_offset_kernel(pmd, address); - - if (pte_none(*pte)) - set_pte(pte, pfn_pte(alloc_table_page(INVALID_M2P_ENTRY), PAGE_KERNEL)); - } + create_page_table(start_page, end_page, INVALID_M2P_ENTRY); return 0; } --- xen/arch/ia64/xen/xensetup.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/xensetup.c Wed Oct 11 16:10:40 2006 -0400 @@ -48,6 +48,7 @@ extern void mem_init(void); extern void mem_init(void); extern void init_IRQ(void); extern void trap_init(void); +extern void xen_patch_kernel(void); /* opt_nosmp: If true, secondary processors are ignored. */ static int opt_nosmp = 0; @@ -81,6 +82,7 @@ unsigned long xenheap_size = XENHEAP_DEF unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE; extern long running_on_sim; unsigned long xen_pstart; +void *xen_heap_start __read_mostly; static int xen_count_pages(u64 start, u64 end, void *arg) @@ -184,8 +186,8 @@ efi_print(void) for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { md = p; - printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", - i, md->type, md->attribute, md->phys_addr, + printk("mem%02u: type=%2u, attr=0x%016lx, range=[0x%016lx-0x%016lx) " + "(%luMB)\n", i, md->type, md->attribute, md->phys_addr, md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), md->num_pages >> (20 - EFI_PAGE_SHIFT)); } @@ -242,7 +244,6 @@ void start_kernel(void) void start_kernel(void) { char *cmdline; - void *heap_start; unsigned long nr_pages; unsigned long dom0_memory_start, dom0_memory_size; unsigned long dom0_initrd_start, dom0_initrd_size; @@ -292,6 +293,8 @@ void start_kernel(void) xenheap_phys_end = xen_pstart + xenheap_size; printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n", xen_pstart, xenheap_phys_end); + + xen_patch_kernel(); kern_md = md = efi_get_md(xen_pstart); md_end = __pa(ia64_imva(&_end)); @@ -389,10 +392,10 @@ void start_kernel(void) printf("find_memory: efi_memmap_walk returns max_page=%lx\n",max_page); efi_print(); - heap_start = memguard_init(ia64_imva(&_end)); - printf("Before heap_start: %p\n", heap_start); - heap_start = __va(init_boot_allocator(__pa(heap_start))); - printf("After heap_start: %p\n", heap_start); + xen_heap_start = memguard_init(ia64_imva(&_end)); + printf("Before xen_heap_start: %p\n", xen_heap_start); + xen_heap_start = __va(init_boot_allocator(__pa(xen_heap_start))); + printf("After xen_heap_start: %p\n", xen_heap_start); efi_memmap_walk(filter_rsvd_memory, init_boot_pages); efi_memmap_walk(xen_count_pages, &nr_pages); @@ -410,10 +413,10 @@ void start_kernel(void) end_boot_allocator(); - init_xenheap_pages(__pa(heap_start), xenheap_phys_end); + init_xenheap_pages(__pa(xen_heap_start), xenheap_phys_end); printk("Xen heap: %luMB (%lukB)\n", - (xenheap_phys_end-__pa(heap_start)) >> 20, - (xenheap_phys_end-__pa(heap_start)) >> 10); + (xenheap_phys_end-__pa(xen_heap_start)) >> 20, + (xenheap_phys_end-__pa(xen_heap_start)) >> 10); late_setup_arch(&cmdline); @@ -495,6 +498,8 @@ printk("num_online_cpus=%d, max_cpus=%d\ /* Hide the HCDP table from dom0 */ efi.hcdp = NULL; } + + expose_p2m_init(); /* Create initial domain 0. */ dom0 = domain_create(0); --- xen/arch/ia64/xen/xentime.c Tue Oct 10 21:05:50 2006 +0100 +++ xen/arch/ia64/xen/xentime.c Wed Oct 11 16:10:40 2006 -0400 @@ -39,7 +39,7 @@ seqlock_t xtime_lock __cacheline_aligned #define TIME_KEEPER_ID 0 unsigned long domain0_ready = 0; static s_time_t stime_irq = 0x0; /* System time at last 'time update' */ -unsigned long itc_scale, ns_scale; +unsigned long itc_scale __read_mostly, ns_scale __read_mostly; unsigned long itc_at_irq; /* We don't expect an absolute cycle value here, since then no way --- xen/include/asm-ia64/dom_fw.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/dom_fw.h Wed Oct 11 16:10:40 2006 -0400 @@ -38,6 +38,13 @@ The high part is the class (xen/pal/sal/efi). */ #define FW_HYPERCALL_NUM_MASK_HIGH ~0xffUL #define FW_HYPERCALL_NUM_MASK_LOW 0xffUL + +/* Xen hypercalls are 0-63. */ +#define FW_HYPERCALL_XEN 0x0000UL + +/* Define some faster and lighter hypercalls. + See definitions in arch-ia64.h */ +#define FW_HYPERCALL_XEN_FAST 0x0200UL /* * PAL can be called in physical or virtual mode simply by --- xen/include/asm-ia64/domain.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/domain.h Wed Oct 11 16:10:40 2006 -0400 @@ -87,6 +87,9 @@ struct arch_domain { unsigned long flags; struct { unsigned int is_vti : 1; +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT + unsigned int has_pervcpu_vhpt : 1; +#endif }; }; @@ -142,11 +145,12 @@ struct arch_domain { (sizeof(vcpu_info_t) * (v)->vcpu_id + \ offsetof(vcpu_info_t, evtchn_upcall_mask)) -struct hypercall_param { - unsigned long va; - unsigned long pa1; - unsigned long pa2; -}; +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT +#define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt) +#else +#define HAS_PERVCPU_VHPT(d) (0) +#endif + struct arch_vcpu { /* Save the state of vcpu. @@ -192,14 +196,19 @@ struct arch_vcpu { char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI char hypercall_continuation; - struct hypercall_param hypercall_param; // used to remap a hypercall param - //for phycial emulation unsigned long old_rsc; int mode_flags; fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */ struct timer hlt_timer; struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */ + +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT + PTA pta; + unsigned long vhpt_maddr; + struct page_info* vhpt_page; + unsigned long vhpt_entries; +#endif #define INVALID_PROCESSOR INT_MAX int last_processor; --- xen/include/asm-ia64/guest_access.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/guest_access.h Wed Oct 11 16:10:40 2006 -0400 @@ -1,91 +1,107 @@ -/****************************************************************************** - * guest_access.h - * - * Copyright (c) 2006, K A Fraser +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) IBM Corp. 2006 + * + * Authors: Hollis Blanchard + * Tristan Gingold */ -#ifndef __ASM_IA64_GUEST_ACCESS_H__ -#define __ASM_IA64_GUEST_ACCESS_H__ +#ifndef __ASM_GUEST_ACCESS_H__ +#define __ASM_GUEST_ACCESS_H__ -#include +extern unsigned long xencomm_copy_to_guest(void *to, const void *from, + unsigned int len, unsigned int skip); +extern unsigned long xencomm_copy_from_guest(void *to, const void *from, + unsigned int len, unsigned int skip); +extern void *xencomm_add_offset(void *handle, unsigned int bytes); +extern int xencomm_handle_is_null(void *ptr); + /* Is the guest handle a NULL reference? */ -#define guest_handle_is_null(hnd) ((hnd).p == NULL) +#define guest_handle_is_null(hnd) \ + ((hnd).p == NULL || xencomm_handle_is_null((hnd).p)) /* Offset the given guest handle into the array it refers to. */ -#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr)) +#define guest_handle_add_offset(hnd, nr) ({ \ + const typeof((hnd).p) _ptr = (hnd).p; \ + (hnd).p = xencomm_add_offset(_ptr, nr * sizeof(*_ptr)); \ +}) /* Cast a guest handle to the specified type of handle. */ -#define guest_handle_cast(hnd, type) ({ \ - type *_x = (hnd).p; \ - (XEN_GUEST_HANDLE(type)) { _x }; \ +#define guest_handle_cast(hnd, type) ({ \ + type *_x = (hnd).p; \ + XEN_GUEST_HANDLE(type) _y; \ + set_xen_guest_handle(_y, _x); \ + _y; \ }) -#define guest_handle_from_ptr(ptr, type) ((XEN_GUEST_HANDLE(type)) { (type *)ptr }) + +/* Since we run in real mode, we can safely access all addresses. That also + * means our __routines are identical to our "normal" routines. */ +#define guest_handle_okay(hnd, nr) 1 /* - * Copy an array of objects to guest context via a guest handle, - * specifying an offset into the guest array. + * Copy an array of objects to guest context via a guest handle. + * Optionally specify an offset into the guest array. */ -#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \ - const typeof(ptr) _x = (hnd).p; \ - const typeof(ptr) _y = (ptr); \ - copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ +#define copy_to_guest_offset(hnd, idx, ptr, nr) \ + __copy_to_guest_offset(hnd, idx, ptr, nr) + +/* Copy sub-field of a structure to guest context via a guest handle. */ +#define copy_field_to_guest(hnd, ptr, field) \ + __copy_field_to_guest(hnd, ptr, field) + +/* + * Copy an array of objects from guest context via a guest handle. + * Optionally specify an offset into the guest array. + */ +#define copy_from_guest_offset(ptr, hnd, idx, nr) \ + __copy_from_guest_offset(ptr, hnd, idx, nr) + +/* Copy sub-field of a structure from guest context via a guest handle. */ +#define copy_field_from_guest(ptr, hnd, field) \ + __copy_field_from_guest(ptr, hnd, field) + +#define __copy_to_guest_offset(hnd, idx, ptr, nr) ({ \ + const typeof(ptr) _d = (hnd).p; \ + const typeof(ptr) _s = (ptr); \ + xencomm_copy_to_guest(_d, _s, sizeof(*_s)*(nr), sizeof(*_s)*(idx)); \ }) -/* - * Copy an array of objects from guest context via a guest handle, - * specifying an offset into the guest array. - */ -#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \ - const typeof(ptr) _x = (hnd).p; \ - const typeof(ptr) _y = (ptr); \ - copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ +#define __copy_field_to_guest(hnd, ptr, field) ({ \ + const int _off = offsetof(typeof(*ptr), field); \ + const typeof(ptr) _d = (hnd).p; \ + const typeof(&(ptr)->field) _s = &(ptr)->field; \ + xencomm_copy_to_guest(_d, _s, sizeof(*_s), _off); \ }) -/* Copy sub-field of a structure to guest context via a guest handle. */ -#define copy_field_to_guest(hnd, ptr, field) ({ \ - const typeof(&(ptr)->field) _x = &(hnd).p->field; \ - const typeof(&(ptr)->field) _y = &(ptr)->field; \ - copy_to_user(_x, _y, sizeof(*_x)); \ +#define __copy_from_guest_offset(ptr, hnd, idx, nr) ({ \ + const typeof(ptr) _s = (hnd).p; \ + const typeof(ptr) _d = (ptr); \ + xencomm_copy_from_guest(_d, _s, sizeof(*_s)*(nr), sizeof(*_s)*(idx)); \ }) -/* Copy sub-field of a structure from guest context via a guest handle. */ -#define copy_field_from_guest(ptr, hnd, field) ({ \ - const typeof(&(ptr)->field) _x = &(hnd).p->field; \ - const typeof(&(ptr)->field) _y = &(ptr)->field; \ - copy_from_user(_y, _x, sizeof(*_x)); \ +#define __copy_field_from_guest(ptr, hnd, field) ({ \ + const int _off = offsetof(typeof(*ptr), field); \ + const typeof(ptr) _s = (hnd).p; \ + const typeof(&(ptr)->field) _d = &(ptr)->field; \ + xencomm_copy_from_guest(_d, _s, sizeof(*_d), _off); \ }) -/* - * Pre-validate a guest handle. - * Allows use of faster __copy_* functions. - */ -#define guest_handle_okay(hnd, nr) \ - array_access_ok((hnd).p, (nr), sizeof(*(hnd).p)) +/* Internal use only: returns 0 in case of bad address. */ +extern unsigned long xencomm_paddr_to_maddr(unsigned long paddr); -#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \ - const typeof(ptr) _x = (hnd).p; \ - const typeof(ptr) _y = (ptr); \ - __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ -}) - -#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \ - const typeof(ptr) _x = (hnd).p; \ - const typeof(ptr) _y = (ptr); \ - __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ -}) - -#define __copy_field_to_guest(hnd, ptr, field) ({ \ - const typeof(&(ptr)->field) _x = &(hnd).p->field; \ - const typeof(&(ptr)->field) _y = &(ptr)->field; \ - __copy_to_user(_x, _y, sizeof(*_x)); \ -}) - -#define __copy_field_from_guest(ptr, hnd, field) ({ \ - const typeof(&(ptr)->field) _x = &(hnd).p->field; \ - const typeof(&(ptr)->field) _y = &(ptr)->field; \ - __copy_from_user(_y, _x, sizeof(*_x)); \ -}) - -#endif /* __ASM_IA64_GUEST_ACCESS_H__ */ +#endif /* __ASM_GUEST_ACCESS_H__ */ --- xen/include/asm-ia64/ia64_int.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/ia64_int.h Wed Oct 11 16:10:40 2006 -0400 @@ -36,7 +36,9 @@ #define IA64_NO_FAULT 0x0000 #define IA64_FAULT 0x0001 #define IA64_RFI_IN_PROGRESS 0x0002 -#define IA64_RETRY 0x0003 +// To avoid conflicting with return value of handle_fpu_swa() +// set IA64_RETRY to -0x000f +#define IA64_RETRY (-0x000f) #define IA64_FORCED_IFA 0x0004 #define IA64_USE_TLB 0x0005 #define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00) --- xen/include/asm-ia64/linux-xen/asm/cache.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/linux-xen/asm/cache.h Wed Oct 11 16:10:40 2006 -0400 @@ -32,6 +32,6 @@ #endif #endif -#define __read_mostly +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) #endif /* _ASM_IA64_CACHE_H */ --- xen/include/asm-ia64/linux-xen/asm/pgtable.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/linux-xen/asm/pgtable.h Wed Oct 11 16:10:40 2006 -0400 @@ -68,6 +68,20 @@ #ifdef XEN #define _PAGE_VIRT_D (__IA64_UL(1) << 53) /* Virtual dirty bit */ #define _PAGE_PROTNONE 0 + +/* domVTI */ +#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */ +#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */ +#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */ +#define GPFN_PIB (3UL << 60) /* PIB base */ +#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ +#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ +#define GPFN_GFW (6UL << 60) /* Guest Firmware */ +#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */ + +#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ +#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ + #else #define _PAGE_PROTNONE (__IA64_UL(1) << 63) #endif --- xen/include/asm-ia64/linux-xen/asm/processor.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/linux-xen/asm/processor.h Wed Oct 11 16:10:40 2006 -0400 @@ -89,6 +89,7 @@ #ifdef XEN #include +#include #else /* like above but expressed as bitfields for more efficient access: */ struct ia64_psr { @@ -571,6 +572,23 @@ ia64_eoi (void) #define cpu_relax() ia64_hint(ia64_hint_pause) +static inline int +ia64_get_irr(unsigned int vector) +{ + unsigned int reg = vector / 64; + unsigned int bit = vector % 64; + u64 irr; + + switch (reg) { + case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; + case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; + case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; + case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; + } + + return test_bit(bit, &irr); +} + static inline void ia64_set_lrr0 (unsigned long val) { --- xen/include/asm-ia64/linux-xen/asm/system.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/linux-xen/asm/system.h Wed Oct 11 16:10:40 2006 -0400 @@ -189,6 +189,7 @@ do { \ #ifdef XEN #define local_irq_is_enabled() (!irqs_disabled()) +extern struct vcpu *ia64_switch_to(struct vcpu *next_task); #else #ifdef __KERNEL__ --- xen/include/asm-ia64/linux/asm/sal.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/linux/asm/sal.h Wed Oct 11 16:10:40 2006 -0400 @@ -657,15 +657,7 @@ ia64_sal_freq_base (unsigned long which, return isrv.status; } -/* Flush all the processor and platform level instruction and/or data caches */ -static inline s64 -ia64_sal_cache_flush (u64 cache_type) -{ - struct ia64_sal_retval isrv; - SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); - return isrv.status; -} - +extern s64 ia64_sal_cache_flush (u64 cache_type); /* Initialize all the processor and platform level instruction and data caches */ static inline s64 --- xen/include/asm-ia64/mm.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/mm.h Wed Oct 11 16:10:40 2006 -0400 @@ -117,10 +117,14 @@ struct page_info #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_maddr(_pfn) < xenheap_phys_end) \ && (page_to_maddr(_pfn) >= xen_pstart)) -static inline struct domain *unpickle_domptr(u32 _d) -{ return (_d == 0) ? NULL : __va(_d); } +extern void *xen_heap_start; +#define __pickle(a) ((unsigned long)a - (unsigned long)xen_heap_start) +#define __unpickle(a) (void *)(a + xen_heap_start) + +static inline struct domain *unpickle_domptr(u64 _d) +{ return (_d == 0) ? NULL : __unpickle(_d); } static inline u32 pickle_domptr(struct domain *_d) -{ return (_d == NULL) ? 0 : (u32)__pa(_d); } +{ return (_d == NULL) ? 0 : (u32)__pickle(_d); } #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain)) #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d)) @@ -420,7 +424,7 @@ extern void relinquish_mm(struct domain* extern void relinquish_mm(struct domain* d); extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr); extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr); -extern void __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags); +extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags); extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr); extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags); struct p2m_entry; @@ -435,6 +439,13 @@ extern unsigned long do_dom0vp_op(unsign extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3); extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order); extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid); +#ifdef CONFIG_XEN_IA64_EXPOSE_P2M +extern void expose_p2m_init(void); +extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn); +#else +#define expose_p2m_init() do { } while (0) +#define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS) +#endif extern volatile unsigned long *mpt_table; extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); --- xen/include/asm-ia64/perfc_defn.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/perfc_defn.h Wed Oct 11 16:10:40 2006 -0400 @@ -107,3 +107,30 @@ PERFPRIVOPADDR(get_ifa) PERFPRIVOPADDR(get_ifa) PERFPRIVOPADDR(thash) #endif + +// vhpt.c +PERFCOUNTER_CPU(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all") +PERFCOUNTER_CPU(domain_flush_vtlb_all, "domain_flush_vtlb_all") +PERFCOUNTER_CPU(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range") +PERFCOUNTER_CPU(domain_flush_vtlb_range, "domain_flush_vtlb_range") + +// domain.c +PERFCOUNTER_CPU(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch") + +// mm.c +PERFCOUNTER_CPU(assign_domain_page_replace, "assign_domain_page_replace") +PERFCOUNTER_CPU(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel") +PERFCOUNTER_CPU(zap_dcomain_page_one, "zap_dcomain_page_one") +PERFCOUNTER_CPU(dom0vp_zap_physmap, "dom0vp_zap_physmap") +PERFCOUNTER_CPU(dom0vp_add_physmap, "dom0vp_add_physmap") +PERFCOUNTER_CPU(create_grant_host_mapping, "create_grant_host_mapping") +PERFCOUNTER_CPU(destroy_grant_host_mapping, "destroy_grant_host_mapping") +PERFCOUNTER_CPU(steal_page_refcount, "steal_page_refcount") +PERFCOUNTER_CPU(steal_page, "steal_page") +PERFCOUNTER_CPU(guest_physmap_add_page, "guest_physmap_add_page") +PERFCOUNTER_CPU(guest_physmap_remove_page, "guest_physmap_remove_page") +PERFCOUNTER_CPU(domain_page_flush, "domain_page_flush") + +// dom0vp +PERFCOUNTER_CPU(dom0vp_phystomach, "dom0vp_phystomach") +PERFCOUNTER_CPU(dom0vp_machtophys, "dom0vp_machtophys") --- xen/include/asm-ia64/uaccess.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/uaccess.h Wed Oct 11 16:10:40 2006 -0400 @@ -211,30 +211,16 @@ extern unsigned long __must_check __copy extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, unsigned long count); -extern int ia64_map_hypercall_param(void); - static inline unsigned long __copy_to_user (void __user *to, const void *from, unsigned long count) { - unsigned long len; - len = __copy_user(to, (void __user *)from, count); - if (len == 0) - return 0; - if (ia64_map_hypercall_param()) - len = __copy_user(to, (void __user *)from, count); /* retry */ - return len; + return __copy_user(to, (void __user *)from, count); } static inline unsigned long __copy_from_user (void *to, const void __user *from, unsigned long count) { - unsigned long len; - len = __copy_user((void __user *)to, from, count); - if (len == 0) - return 0; - if (ia64_map_hypercall_param()) - len = __copy_user((void __user *) to, from, count); /* retry */ - return len; + return __copy_user((void __user *)to, from, count); } #define __copy_to_user_inatomic __copy_to_user --- xen/include/asm-ia64/vhpt.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/vhpt.h Wed Oct 11 16:10:40 2006 -0400 @@ -37,11 +37,46 @@ extern void vhpt_multiple_insert(unsigne unsigned long logps); extern void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps); -void vhpt_flush(void); +void local_vhpt_flush(void); /* Currently the VHPT is allocated per CPU. */ DECLARE_PER_CPU (unsigned long, vhpt_paddr); DECLARE_PER_CPU (unsigned long, vhpt_pend); +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT +#if !VHPT_ENABLED +#error "VHPT_ENABLED must be set for CONFIG_XEN_IA64_PERVCPU_VHPT" +#endif +#endif + +#include +int pervcpu_vhpt_alloc(struct vcpu *v); +void pervcpu_vhpt_free(struct vcpu *v); +static inline unsigned long +vcpu_vhpt_maddr(struct vcpu* v) +{ +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT + if (HAS_PERVCPU_VHPT(v->domain)) + return v->arch.vhpt_maddr; +#endif + +#if 0 + // referencecing v->processor is racy. + return per_cpu(vhpt_paddr, v->processor); +#endif + BUG_ON(v != current); + return __get_cpu_var(vhpt_paddr); +} + +static inline unsigned long +vcpu_pta(struct vcpu* v) +{ +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT + if (HAS_PERVCPU_VHPT(v->domain)) + return v->arch.pta.val; +#endif + return VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED; +} + #endif /* !__ASSEMBLY */ #endif --- xen/include/asm-ia64/vmx.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/vmx.h Wed Oct 11 16:10:40 2006 -0400 @@ -35,6 +35,7 @@ extern void vmx_save_state(struct vcpu * extern void vmx_save_state(struct vcpu *v); extern void vmx_load_state(struct vcpu *v); extern void vmx_setup_platform(struct domain *d); +extern void vmx_do_launch(struct vcpu *v); extern void vmx_io_assist(struct vcpu *v); extern int ia64_hypercall (struct pt_regs *regs); extern void vmx_save_state(struct vcpu *v); --- xen/include/asm-ia64/vmx_vcpu.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/vmx_vcpu.h Wed Oct 11 16:10:40 2006 -0400 @@ -114,6 +114,7 @@ extern void memwrite_p(VCPU *vcpu, u64 * extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s); extern void vcpu_load_kernel_regs(VCPU *vcpu); extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu); +extern IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu); extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *); extern void dtlb_fault (VCPU *vcpu, u64 vadr); @@ -121,7 +122,8 @@ extern void alt_dtlb (VCPU *vcpu, u64 va extern void alt_dtlb (VCPU *vcpu, u64 vadr); extern void dvhpt_fault (VCPU *vcpu, u64 vadr); extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr); -extern void page_not_present(VCPU *vcpu, u64 vadr); +extern void data_page_not_present(VCPU *vcpu, u64 vadr); +extern void inst_page_not_present(VCPU *vcpu, u64 vadr); extern void data_access_rights(VCPU *vcpu, u64 vadr); /************************************************************************** --- xen/include/asm-ia64/xenkregs.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/asm-ia64/xenkregs.h Wed Oct 11 16:10:40 2006 -0400 @@ -7,8 +7,7 @@ #define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ #define IA64_TR_VHPT 4 /* dtr4: vhpt */ #define IA64_TR_MAPPED_REGS 5 /* dtr5: vcpu mapped regs */ -#define IA64_TR_PERVP_VHPT 6 -#define IA64_DTR_GUEST_KERNEL 7 +#define IA64_DTR_GUEST_KERNEL 6 #define IA64_ITR_GUEST_KERNEL 2 /* Processor status register bits: */ #define IA64_PSR_VM_BIT 46 --- xen/include/public/arch-ia64.h Tue Oct 10 21:05:50 2006 +0100 +++ xen/include/public/arch-ia64.h Wed Oct 11 16:10:40 2006 -0400 @@ -47,18 +47,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; - -#define GPFN_MEM (0UL << 56) /* Guest pfn is normal mem */ -#define GPFN_FRAME_BUFFER (1UL << 56) /* VGA framebuffer */ -#define GPFN_LOW_MMIO (2UL << 56) /* Low MMIO range */ -#define GPFN_PIB (3UL << 56) /* PIB base */ -#define GPFN_IOSAPIC (4UL << 56) /* IOSAPIC base */ -#define GPFN_LEGACY_IO (5UL << 56) /* Legacy I/O base */ -#define GPFN_GFW (6UL << 56) /* Guest Firmware */ -#define GPFN_HIGH_MMIO (7UL << 56) /* High MMIO range */ - -#define GPFN_IO_MASK (7UL << 56) /* Guest pfn is I/O type */ -#define GPFN_INV_MASK (31UL << 59) /* Guest pfn is invalid */ #define INVALID_MFN (~0UL) @@ -336,33 +324,33 @@ typedef struct vcpu_guest_context vcpu_g typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); -// dom0 vp op +/* dom0 vp op */ #define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0 -#define IA64_DOM0VP_ioremap 0 // map io space in machine - // address to dom0 physical - // address space. - // currently physical - // assignedg address equals to - // machine address -#define IA64_DOM0VP_phystomach 1 // convert a pseudo physical - // page frame number - // to the corresponding - // machine page frame number. - // if no page is assigned, - // INVALID_MFN or GPFN_INV_MASK - // is returned depending on - // domain's non-vti/vti mode. -#define IA64_DOM0VP_machtophys 3 // convert a machine page - // frame number - // to the corresponding - // pseudo physical page frame - // number of the caller domain -#define IA64_DOM0VP_zap_physmap 17 // unmap and free pages - // contained in the specified - // pseudo physical region -#define IA64_DOM0VP_add_physmap 18 // assigne machine page frane - // to dom0's pseudo physical - // address space. +/* Map io space in machine address to dom0 physical address space. + Currently physical assigned address equals to machine address. */ +#define IA64_DOM0VP_ioremap 0 + +/* Convert a pseudo physical page frame number to the corresponding + machine page frame number. If no page is assigned, INVALID_MFN or + GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */ +#define IA64_DOM0VP_phystomach 1 + +/* Convert a machine page frame number to the corresponding pseudo physical + page frame number of the caller domain. */ +#define IA64_DOM0VP_machtophys 3 + +/* Reserved for future use. */ +#define IA64_DOM0VP_iounmap 4 + +/* Unmap and free pages contained in the specified pseudo physical region. */ +#define IA64_DOM0VP_zap_physmap 5 + +/* Assign machine page frame to dom0's pseudo physical address space. */ +#define IA64_DOM0VP_add_physmap 6 + +/* expose the p2m table into domain */ +#define IA64_DOM0VP_expose_p2m 7 + // flags for page assignement to pseudo physical address space #define _ASSIGN_readonly 0 #define ASSIGN_readonly (1UL << _ASSIGN_readonly) @@ -395,15 +383,12 @@ struct xen_ia64_boot_param { #endif /* !__ASSEMBLY__ */ -/* Address of shared_info in domain virtual space. - This is the default address, for compatibility only. */ -#define XSI_BASE 0xf100000000000000 - /* Size of the shared_info area (this is not related to page size). */ #define XSI_SHIFT 14 #define XSI_SIZE (1 << XSI_SHIFT) /* Log size of mapped_regs area (64 KB - only 4KB is used). */ #define XMAPPEDREGS_SHIFT 12 +#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) /* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ #define XMAPPEDREGS_OFS XSI_SIZE @@ -435,6 +420,17 @@ struct xen_ia64_boot_param { #define HYPERPRIVOP_GET_PSR 0x19 #define HYPERPRIVOP_MAX 0x19 +/* Fast and light hypercalls. */ +#define __HYPERVISOR_ia64_fast_eoi 0x0200 + +/* Xencomm macros. */ +#define XENCOMM_INLINE_MASK 0xf800000000000000UL +#define XENCOMM_INLINE_FLAG 0x8000000000000000UL + +#define XENCOMM_IS_INLINE(addr) \ + (((unsigned long)(addr) & XENCOMM_INLINE_MASK) == XENCOMM_INLINE_FLAG) +#define XENCOMM_INLINE_ADDR(addr) \ + ((unsigned long)(addr) & ~XENCOMM_INLINE_MASK) #endif /* __HYPERVISOR_IF_IA64_H__ */ /* --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ xen/arch/ia64/tools/p2m_expose/Makefile Wed Oct 11 16:10:40 2006 -0400 @@ -0,0 +1,28 @@ +ifneq ($(KERNELRELEASE),) +obj-m += expose_p2m.o +else +PWD := $(shell pwd) +TOPDIR ?= $(abspath $(PWD)/../../../../..) +KDIR ?= $(TOPDIR)/linux-$(shell awk '/^LINUX_VER\>/{print $$3}' $(TOPDIR)/buildconfigs/mk.linux-2.6-xen)-xen +#CROSS_COMPILE ?= ia64-unknown-linux- +#ARCH ?= ia64 + +ifneq ($(O),) +OPT_O := O=$(realpath $(O)) +endif + +ifneq ($(V),) +OPT_V := V=$(V) +endif + +ifneq ($(ARCH),) +OPT_ARCH := ARCH=$(ARCH) +endif + +ifneq ($(CROSS_COMPILE),) +OPT_CORSS_COMPILE := CROSS_COMPILE=$(CROSS_COMPILE) +endif + +default: + $(MAKE) -C $(KDIR) $(OPT_O) $(OPT_V) $(OPT_CORSS_COMPILE) $(OPT_ARCH) M=$(PWD) +endif --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ xen/arch/ia64/tools/p2m_expose/README.p2m_expose Wed Oct 11 16:10:40 2006 -0400 @@ -0,0 +1,12 @@ +This directory contains Linux kernel module for p2m exposure test/benchmark. + +1. build kernel module + - At fist build, linux-xen as usual + - then type just 'make' in this directory, then you'll have expose_p2m.ko. + See Makefile for details. + +2. test, benchmark. + - type 'insmod expose_p2m.ko' on the system. + Then the result is printed out to your console. + insmod fails with EINVAL so that you don't have to execute rmmod. + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ xen/arch/ia64/tools/p2m_expose/expose_p2m.c Wed Oct 11 16:10:40 2006 -0400 @@ -0,0 +1,185 @@ +/****************************************************************************** + * arch/ia64/xen/expose_p2m.c + * + * Copyright (c) 2006 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#define printd(fmt, ...) printk("%s:%d " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) + +// copied from arch/ia64/mm/tlb.c. it isn't exported. +void +local_flush_tlb_all (void) +{ + unsigned long i, j, flags, count0, count1, stride0, stride1, addr; + + addr = local_cpu_data->ptce_base; + count0 = local_cpu_data->ptce_count[0]; + count1 = local_cpu_data->ptce_count[1]; + stride0 = local_cpu_data->ptce_stride[0]; + stride1 = local_cpu_data->ptce_stride[1]; + + local_irq_save(flags); + for (i = 0; i < count0; ++i) { + for (j = 0; j < count1; ++j) { + ia64_ptce(addr); + addr += stride1; + } + addr += stride0; + } + local_irq_restore(flags); + ia64_srlz_i(); /* srlz.i implies srlz.d */ +} + +static void +do_p2m(unsigned long (*conv)(unsigned long), + const char* msg, const char* prefix, + unsigned long start_gpfn, unsigned end_gpfn, unsigned long stride) +{ + struct timeval before_tv; + struct timeval after_tv; + unsigned long gpfn; + unsigned long mfn; + unsigned long count; + nsec_t nsec; + + count = 0; + do_gettimeofday(&before_tv); + for (gpfn = start_gpfn; gpfn < end_gpfn; gpfn += stride) { + mfn = (*conv)(gpfn); + count++; + } + do_gettimeofday(&after_tv); + nsec = timeval_to_ns(&after_tv) - timeval_to_ns(&before_tv); + printk("%s stride %4ld %s: %9ld / %6ld = %5ld nsec\n", + msg, stride, prefix, + nsec, count, nsec/count); +} + + +static void +do_with_hypercall(const char* msg, + unsigned long start_gpfn, unsigned long end_gpfn, + unsigned long stride) +{ + do_p2m(&HYPERVISOR_phystomach, msg, "hypercall", + start_gpfn, end_gpfn, stride); +} + +static void +do_with_table(const char* msg, + unsigned long start_gpfn, unsigned long end_gpfn, + unsigned long stride) +{ + do_p2m(&p2m_phystomach, msg, "p2m table", + start_gpfn, end_gpfn, stride); +} + +static int __init +expose_p2m_init(void) +{ + unsigned long gpfn; + unsigned long mfn; + unsigned long p2m_mfn; + + int error_count = 0; + + const int strides[] = { + PTRS_PER_PTE, PTRS_PER_PTE/2, PTRS_PER_PTE/3, PTRS_PER_PTE/4, + L1_CACHE_BYTES/sizeof(pte_t), 1 + }; + int i; + + +#if 0 + printd("about to call p2m_expose_init()\n"); + if (p2m_expose_init() < 0) { + printd("p2m_expose_init() failed\n"); + return -EINVAL; + } + printd("p2m_expose_init() success\n"); +#else + if (!p2m_initialized) { + printd("p2m exposure isn't initialized\n"); + return -EINVAL; + } +#endif + + printd("p2m expose test begins\n"); + for (gpfn = p2m_min_low_pfn; gpfn < p2m_max_low_pfn; gpfn++) { + mfn = HYPERVISOR_phystomach(gpfn); + p2m_mfn = p2m_phystomach(gpfn); + if (mfn != p2m_mfn) { + printd("gpfn 0x%016lx " + "mfn 0x%016lx p2m_mfn 0x%016lx\n", + gpfn, mfn, p2m_mfn); + printd("mpaddr 0x%016lx " + "maddr 0x%016lx p2m_maddr 0x%016lx\n", + gpfn << PAGE_SHIFT, + mfn << PAGE_SHIFT, p2m_mfn << PAGE_SHIFT); + + error_count++; + if (error_count > 16) { + printk("too many errors\n"); + return -EINVAL; + } + } + } + printd("p2m expose test done!\n"); + + printk("type " + "stride " + "type : " + " nsec / count = " + "nsec per conv\n"); + for (i = 0; i < sizeof(strides)/sizeof(strides[0]); i++) { + int stride = strides[i]; + local_flush_tlb_all(); + do_with_hypercall("cold tlb", + p2m_min_low_pfn, p2m_max_low_pfn, stride); + do_with_hypercall("warm tlb", + p2m_min_low_pfn, p2m_max_low_pfn, stride); + + local_flush_tlb_all(); + do_with_table("cold tlb", + p2m_min_low_pfn, p2m_max_low_pfn, stride); + do_with_table("warm tlb", + p2m_min_low_pfn, p2m_max_low_pfn, stride); + } + + return -EINVAL; +} + +static void __exit +expose_p2m_cleanup(void) +{ +} + +module_init(expose_p2m_init); +module_exit(expose_p2m_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Isaku Yamahata "); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ xen/arch/ia64/vmx/optvfault.S Wed Oct 11 16:10:40 2006 -0400 @@ -0,0 +1,518 @@ +/* + * arch/ia64/vmx/optvfault.S + * optimize virtualization fault handler + * + * Copyright (C) 2006 Intel Co + * Xuefei Xu (Anthony Xu) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ACCE_MOV_FROM_AR +#define ACCE_MOV_FROM_RR + +//mov r1=ar3 +GLOBAL_ENTRY(asm_mov_from_ar) +#ifndef ACCE_MOV_FROM_AR + br.many vmx_vitualization_fault_back +#endif + add r18=VCPU_VTM_OFFSET_OFS,r21 + mov r19=ar.itc + extr.u r17=r25,6,7 + ;; + ld8 r18=[r18] + movl r20=asm_mov_to_reg + ;; + adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20 + shladd r17=r17,4,r20 + mov r24=b0 + ;; + add r19=r19,r18 + mov b0=r17 + br.sptk.few b0 + ;; +END(asm_mov_from_ar) + + +// mov r1=rr[r3] +GLOBAL_ENTRY(asm_mov_from_rr) +#ifndef ACCE_MOV_FROM_RR + br.many vmx_vitualization_fault_back +#endif + extr.u r16=r25,20,7 + extr.u r17=r25,6,7 + movl r20=asm_mov_from_reg + ;; + adds r30=asm_mov_from_rr_back_1-asm_mov_from_reg,r20 + shladd r16=r16,4,r20 + mov r24=b0 + ;; + add r27=VCPU_VRR0_OFS,r21 + mov b0=r16 + br.many b0 + ;; +asm_mov_from_rr_back_1: + adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20 + adds r22=asm_mov_to_reg-asm_mov_from_reg,r20 + shr.u r26=r19,61 + ;; + shladd r17=r17,4,r22 + shladd r27=r26,3,r27 + ;; + ld8 r19=[r27] + mov b0=r17 + br.many b0 +END(asm_mov_from_rr) + + +#define MOV_TO_REG0 \ +{; \ + nop.b 0x0; \ + nop.b 0x0; \ + nop.b 0x0; \ + ;; \ +}; + + +#define MOV_TO_REG(n) \ +{; \ + mov r##n##=r19; \ + mov b0=r30; \ + br.sptk.many b0; \ + ;; \ +}; + + +#define MOV_FROM_REG(n) \ +{; \ + mov r19=r##n##; \ + mov b0=r30; \ + br.sptk.many b0; \ + ;; \ +}; + + +#define MOV_TO_BANK0_REG(n) \ +ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \ +{; \ + mov r26=r2; \ + mov r2=r19; \ + bsw.1; \ + ;; \ +}; \ +{; \ + mov r##n##=r2; \ + nop.b 0x0; \ + bsw.0; \ + ;; \ +}; \ +{; \ + mov r2=r26; \ + mov b0=r30; \ + br.sptk.many b0; \ + ;; \ +}; \ +END(asm_mov_to_bank0_reg##n##) + + +#define MOV_FROM_BANK0_REG(n) \ +ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \ +{; \ + mov r26=r2; \ + nop.b 0x0; \ + bsw.1; \ + ;; \ +}; \ +{; \ + mov r2=r##n##; \ + nop.b 0x0; \ + bsw.0; \ + ;; \ +}; \ +{; \ + mov r19=r2; \ + mov r2=r26; \ + mov b0=r30; \ +}; \ +{; \ + nop.b 0x0; \ + nop.b 0x0; \ + br.sptk.many b0; \ + ;; \ +}; \ +END(asm_mov_from_bank0_reg##n##) + + +#define JMP_TO_MOV_TO_BANK0_REG(n) \ +{; \ + nop.b 0x0; \ + nop.b 0x0; \ + br.sptk.many asm_mov_to_bank0_reg##n##; \ + ;; \ +} + + +#define JMP_TO_MOV_FROM_BANK0_REG(n) \ +{; \ + nop.b 0x0; \ + nop.b 0x0; \ + br.sptk.many asm_mov_from_bank0_reg##n##; \ + ;; \ +} + + +MOV_FROM_BANK0_REG(16) +MOV_FROM_BANK0_REG(17) +MOV_FROM_BANK0_REG(18) +MOV_FROM_BANK0_REG(19) +MOV_FROM_BANK0_REG(20) +MOV_FROM_BANK0_REG(21) +MOV_FROM_BANK0_REG(22) +MOV_FROM_BANK0_REG(23) +MOV_FROM_BANK0_REG(24) +MOV_FROM_BANK0_REG(25) +MOV_FROM_BANK0_REG(26) +MOV_FROM_BANK0_REG(27) +MOV_FROM_BANK0_REG(28) +MOV_FROM_BANK0_REG(29) +MOV_FROM_BANK0_REG(30) +MOV_FROM_BANK0_REG(31) + + +// mov from reg table +ENTRY(asm_mov_from_reg) + MOV_FROM_REG(0) + MOV_FROM_REG(1) + MOV_FROM_REG(2) + MOV_FROM_REG(3) + MOV_FROM_REG(4) + MOV_FROM_REG(5) + MOV_FROM_REG(6) + MOV_FROM_REG(7) + MOV_FROM_REG(8) + MOV_FROM_REG(9) + MOV_FROM_REG(10) + MOV_FROM_REG(11) + MOV_FROM_REG(12) + MOV_FROM_REG(13) + MOV_FROM_REG(14) + MOV_FROM_REG(15) + JMP_TO_MOV_FROM_BANK0_REG(16) + JMP_TO_MOV_FROM_BANK0_REG(17) + JMP_TO_MOV_FROM_BANK0_REG(18) + JMP_TO_MOV_FROM_BANK0_REG(19) + JMP_TO_MOV_FROM_BANK0_REG(20) + JMP_TO_MOV_FROM_BANK0_REG(21) + JMP_TO_MOV_FROM_BANK0_REG(22) + JMP_TO_MOV_FROM_BANK0_REG(23) + JMP_TO_MOV_FROM_BANK0_REG(24) + JMP_TO_MOV_FROM_BANK0_REG(25) + JMP_TO_MOV_FROM_BANK0_REG(26) + JMP_TO_MOV_FROM_BANK0_REG(27) + JMP_TO_MOV_FROM_BANK0_REG(28) + JMP_TO_MOV_FROM_BANK0_REG(29) + JMP_TO_MOV_FROM_BANK0_REG(30) + JMP_TO_MOV_FROM_BANK0_REG(31) + MOV_FROM_REG(32) + MOV_FROM_REG(33) + MOV_FROM_REG(34) + MOV_FROM_REG(35) + MOV_FROM_REG(36) + MOV_FROM_REG(37) + MOV_FROM_REG(38) + MOV_FROM_REG(39) + MOV_FROM_REG(40) + MOV_FROM_REG(41) + MOV_FROM_REG(42) + MOV_FROM_REG(43) + MOV_FROM_REG(44) + MOV_FROM_REG(45) + MOV_FROM_REG(46) + MOV_FROM_REG(47) + MOV_FROM_REG(48) + MOV_FROM_REG(49) + MOV_FROM_REG(50) + MOV_FROM_REG(51) + MOV_FROM_REG(52) + MOV_FROM_REG(53) + MOV_FROM_REG(54) + MOV_FROM_REG(55) + MOV_FROM_REG(56) + MOV_FROM_REG(57) + MOV_FROM_REG(58) + MOV_FROM_REG(59) + MOV_FROM_REG(60) + MOV_FROM_REG(61) + MOV_FROM_REG(62) + MOV_FROM_REG(63) + MOV_FROM_REG(64) + MOV_FROM_REG(65) + MOV_FROM_REG(66) + MOV_FROM_REG(67) + MOV_FROM_REG(68) + MOV_FROM_REG(69) + MOV_FROM_REG(70) + MOV_FROM_REG(71) + MOV_FROM_REG(72) + MOV_FROM_REG(73) + MOV_FROM_REG(74) + MOV_FROM_REG(75) + MOV_FROM_REG(76) + MOV_FROM_REG(77) + MOV_FROM_REG(78) + MOV_FROM_REG(79) + MOV_FROM_REG(80) + MOV_FROM_REG(81) + MOV_FROM_REG(82) + MOV_FROM_REG(83) + MOV_FROM_REG(84) + MOV_FROM_REG(85) + MOV_FROM_REG(86) + MOV_FROM_REG(87) + MOV_FROM_REG(88) + MOV_FROM_REG(89) + MOV_FROM_REG(90) + MOV_FROM_REG(91) + MOV_FROM_REG(92) + MOV_FROM_REG(93) + MOV_FROM_REG(94) + MOV_FROM_REG(95) + MOV_FROM_REG(96) + MOV_FROM_REG(97) + MOV_FROM_REG(98) + MOV_FROM_REG(99) + MOV_FROM_REG(100) + MOV_FROM_REG(101) + MOV_FROM_REG(102) + MOV_FROM_REG(103) + MOV_FROM_REG(104) + MOV_FROM_REG(105) + MOV_FROM_REG(106) + MOV_FROM_REG(107) + MOV_FROM_REG(108) + MOV_FROM_REG(109) + MOV_FROM_REG(110) + MOV_FROM_REG(111) + MOV_FROM_REG(112) + MOV_FROM_REG(113) + MOV_FROM_REG(114) + MOV_FROM_REG(115) + MOV_FROM_REG(116) + MOV_FROM_REG(117) + MOV_FROM_REG(118) + MOV_FROM_REG(119) + MOV_FROM_REG(120) + MOV_FROM_REG(121) + MOV_FROM_REG(122) + MOV_FROM_REG(123) + MOV_FROM_REG(124) + MOV_FROM_REG(125) + MOV_FROM_REG(126) + MOV_FROM_REG(127) +END(asm_mov_from_reg) + + +/* must be in bank 0 + * parameter: + * r31: pr + * r24: b0 + */ +ENTRY(vmx_resume_to_guest) + mov r16=cr.ipsr + movl r20=__vsa_base + ;; + ld8 r20=[r20] + adds r19=IA64_VPD_BASE_OFFSET,r21 + ;; + ld8 r25=[r19] + extr.u r17=r16,IA64_PSR_RI_BIT,2 + tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 + ;; + (p6) mov r18=cr.iip + (p6) mov r17=r0 + ;; + (p6) add r18=0x10,r18 + (p7) add r17=1,r17 + ;; + (p6) mov cr.iip=r18 + dep r16=r17,r16,IA64_PSR_RI_BIT,2 + ;; + mov cr.ipsr=r16 + mov r17=cr.isr + adds r19= VPD_VPSR_START_OFFSET,r25 + ld8 r26=[r25] + add r29=PAL_VPS_RESUME_NORMAL,r20 + add r28=PAL_VPS_RESUME_HANDLER,r20 + ;; + ld8 r19=[r19] + mov b0=r29 + cmp.ne p6,p7 = r0,r0 + ;; + tbit.nz.or.andcm p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic + tbit.nz.or.andcm p6,p7 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir + ;; + (p6) mov b0=r29 + (p7) mov b0=r28 + mov pr=r31,-2 + br.sptk.many b0 // call pal service + ;; +END(vmx_resume_to_guest) + + +MOV_TO_BANK0_REG(16) +MOV_TO_BANK0_REG(17) +MOV_TO_BANK0_REG(18) +MOV_TO_BANK0_REG(19) +MOV_TO_BANK0_REG(20) +MOV_TO_BANK0_REG(21) +MOV_TO_BANK0_REG(22) +MOV_TO_BANK0_REG(23) +MOV_TO_BANK0_REG(24) +MOV_TO_BANK0_REG(25) +MOV_TO_BANK0_REG(26) +MOV_TO_BANK0_REG(27) +MOV_TO_BANK0_REG(28) +MOV_TO_BANK0_REG(29) +MOV_TO_BANK0_REG(30) +MOV_TO_BANK0_REG(31) + + +// mov to reg table +ENTRY(asm_mov_to_reg) + MOV_TO_REG0 + MOV_TO_REG(1) + MOV_TO_REG(2) + MOV_TO_REG(3) + MOV_TO_REG(4) + MOV_TO_REG(5) + MOV_TO_REG(6) + MOV_TO_REG(7) + MOV_TO_REG(8) + MOV_TO_REG(9) + MOV_TO_REG(10) + MOV_TO_REG(11) + MOV_TO_REG(12) + MOV_TO_REG(13) + MOV_TO_REG(14) + MOV_TO_REG(15) + JMP_TO_MOV_TO_BANK0_REG(16) + JMP_TO_MOV_TO_BANK0_REG(17) + JMP_TO_MOV_TO_BANK0_REG(18) + JMP_TO_MOV_TO_BANK0_REG(19) + JMP_TO_MOV_TO_BANK0_REG(20) + JMP_TO_MOV_TO_BANK0_REG(21) + JMP_TO_MOV_TO_BANK0_REG(22) + JMP_TO_MOV_TO_BANK0_REG(23) + JMP_TO_MOV_TO_BANK0_REG(24) + JMP_TO_MOV_TO_BANK0_REG(25) + JMP_TO_MOV_TO_BANK0_REG(26) + JMP_TO_MOV_TO_BANK0_REG(27) + JMP_TO_MOV_TO_BANK0_REG(28) + JMP_TO_MOV_TO_BANK0_REG(29) + JMP_TO_MOV_TO_BANK0_REG(30) + JMP_TO_MOV_TO_BANK0_REG(31) + MOV_TO_REG(32) + MOV_TO_REG(33) + MOV_TO_REG(34) + MOV_TO_REG(35) + MOV_TO_REG(36) + MOV_TO_REG(37) + MOV_TO_REG(38) + MOV_TO_REG(39) + MOV_TO_REG(40) + MOV_TO_REG(41) + MOV_TO_REG(42) + MOV_TO_REG(43) + MOV_TO_REG(44) + MOV_TO_REG(45) + MOV_TO_REG(46) + MOV_TO_REG(47) + MOV_TO_REG(48) + MOV_TO_REG(49) + MOV_TO_REG(50) + MOV_TO_REG(51) + MOV_TO_REG(52) + MOV_TO_REG(53) + MOV_TO_REG(54) + MOV_TO_REG(55) + MOV_TO_REG(56) + MOV_TO_REG(57) + MOV_TO_REG(58) + MOV_TO_REG(59) + MOV_TO_REG(60) + MOV_TO_REG(61) + MOV_TO_REG(62) + MOV_TO_REG(63) + MOV_TO_REG(64) + MOV_TO_REG(65) + MOV_TO_REG(66) + MOV_TO_REG(67) + MOV_TO_REG(68) + MOV_TO_REG(69) + MOV_TO_REG(70) + MOV_TO_REG(71) + MOV_TO_REG(72) + MOV_TO_REG(73) + MOV_TO_REG(74) + MOV_TO_REG(75) + MOV_TO_REG(76) + MOV_TO_REG(77) + MOV_TO_REG(78) + MOV_TO_REG(79) + MOV_TO_REG(80) + MOV_TO_REG(81) + MOV_TO_REG(82) + MOV_TO_REG(83) + MOV_TO_REG(84) + MOV_TO_REG(85) + MOV_TO_REG(86) + MOV_TO_REG(87) + MOV_TO_REG(88) + MOV_TO_REG(89) + MOV_TO_REG(90) + MOV_TO_REG(91) + MOV_TO_REG(92) + MOV_TO_REG(93) + MOV_TO_REG(94) + MOV_TO_REG(95) + MOV_TO_REG(96) + MOV_TO_REG(97) + MOV_TO_REG(98) + MOV_TO_REG(99) + MOV_TO_REG(100) + MOV_TO_REG(101) + MOV_TO_REG(102) + MOV_TO_REG(103) + MOV_TO_REG(104) + MOV_TO_REG(105) + MOV_TO_REG(106) + MOV_TO_REG(107) + MOV_TO_REG(108) + MOV_TO_REG(109) + MOV_TO_REG(110) + MOV_TO_REG(111) + MOV_TO_REG(112) + MOV_TO_REG(113) + MOV_TO_REG(114) + MOV_TO_REG(115) + MOV_TO_REG(116) + MOV_TO_REG(117) + MOV_TO_REG(118) + MOV_TO_REG(119) + MOV_TO_REG(120) + MOV_TO_REG(121) + MOV_TO_REG(122) + MOV_TO_REG(123) + MOV_TO_REG(124) + MOV_TO_REG(125) + MOV_TO_REG(126) + MOV_TO_REG(127) +END(asm_mov_to_reg) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ xen/arch/ia64/xen/xencomm.c Wed Oct 11 16:10:40 2006 -0400 @@ -0,0 +1,380 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) IBM Corp. 2006 + * + * Authors: Hollis Blanchard + * Tristan Gingold + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG +#ifdef DEBUG +static int xencomm_debug = 1; /* extremely verbose */ +#else +#define xencomm_debug 0 +#endif + +static int +xencomm_copy_chunk_from( + unsigned long to, + unsigned long paddr, + unsigned int len) +{ + unsigned long maddr; + struct page_info *page; + + while (1) { + maddr = xencomm_paddr_to_maddr(paddr); + if (xencomm_debug > 1) + printk("%lx[%d] -> %lx\n", maddr, len, to); + if (maddr == 0) + return -EFAULT; + + page = virt_to_page(maddr); + if (get_page(page, current->domain) == 0) { + if (page_get_owner(page) != current->domain) { + /* This page might be a page granted by another domain */ + panic_domain(NULL, "copy_from_guest from foreign domain\n"); + } + /* Try again. */ + continue; + } + memcpy((void *)to, (void *)maddr, len); + put_page(page); + return 0; + } +} + +/** + * xencomm_copy_from_guest: Copy a block of data from domain space. + * @to: Machine address. + * @from: Physical address to a xencomm buffer descriptor. + * @n: Number of bytes to copy. + * @skip: Number of bytes from the start to skip. + * + * Copy data from domain to hypervisor. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +unsigned long +xencomm_copy_from_guest( + void *to, + const void *from, + unsigned int n, + unsigned int skip) +{ + struct xencomm_desc *desc; + unsigned long desc_addr; + unsigned int from_pos = 0; + unsigned int to_pos = 0; + unsigned int i = 0; + + if (xencomm_debug) + printf("xencomm_copy_from_guest: from=%lx+%u n=%u\n", + (unsigned long)from, skip, n); + + if (XENCOMM_IS_INLINE(from)) { + unsigned long src_paddr = XENCOMM_INLINE_ADDR(from); + + src_paddr += skip; + + while (n > 0) { + unsigned int chunksz; + unsigned int bytes; + int res; + + chunksz = PAGE_SIZE - (src_paddr % PAGE_SIZE); + + bytes = min(chunksz, n); + + res = xencomm_copy_chunk_from((unsigned long)to, src_paddr, bytes); + if (res != 0) + return -EFAULT; + src_paddr += bytes; + to += bytes; + n -= bytes; + } + + /* Always successful. */ + return 0; + } + + /* first we need to access the descriptor */ + desc_addr = xencomm_paddr_to_maddr((unsigned long)from); + if (desc_addr == 0) + return -EFAULT; + + desc = (struct xencomm_desc *)desc_addr; + if (desc->magic != XENCOMM_MAGIC) { + printk("%s: error: %p magic was 0x%x\n", + __func__, desc, desc->magic); + return -EFAULT; + } + + /* iterate through the descriptor, copying up to a page at a time */ + while ((to_pos < n) && (i < desc->nr_addrs)) { + unsigned long src_paddr = desc->address[i]; + unsigned int pgoffset; + unsigned int chunksz; + unsigned int chunk_skip; + + if (src_paddr == XENCOMM_INVALID) { + i++; + continue; + } + + pgoffset = src_paddr % PAGE_SIZE; + chunksz = PAGE_SIZE - pgoffset; + + chunk_skip = min(chunksz, skip); + from_pos += chunk_skip; + chunksz -= chunk_skip; + skip -= chunk_skip; + + if (skip == 0) { + unsigned int bytes = min(chunksz, n - to_pos); + int res; + + if (xencomm_debug > 1) + printf ("src_paddr=%lx i=%d, skip=%d\n", + src_paddr, i, chunk_skip); + + res = xencomm_copy_chunk_from((unsigned long)to + to_pos, + src_paddr + chunk_skip, bytes); + if (res != 0) + return -EFAULT; + + from_pos += bytes; + to_pos += bytes; + } + + i++; + } + + return n - to_pos; +} + +static int +xencomm_copy_chunk_to( + unsigned long paddr, + unsigned long from, + unsigned int len) +{ + unsigned long maddr; + struct page_info *page; + + while (1) { + maddr = xencomm_paddr_to_maddr(paddr); + if (xencomm_debug > 1) + printk("%lx[%d] -> %lx\n", from, len, maddr); + if (maddr == 0) + return -EFAULT; + + page = virt_to_page(maddr); + if (get_page(page, current->domain) == 0) { + if (page_get_owner(page) != current->domain) { + /* This page might be a page granted by another domain */ + panic_domain(NULL, "copy_to_guest to foreign domain\n"); + } + /* Try again. */ + continue; + } + memcpy((void *)maddr, (void *)from, len); + put_page(page); + return 0; + } +} + +/** + * xencomm_copy_to_guest: Copy a block of data to domain space. + * @to: Physical address to xencomm buffer descriptor. + * @from: Machine address. + * @n: Number of bytes to copy. + * @skip: Number of bytes from the start to skip. + * + * Copy data from hypervisor to domain. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +unsigned long +xencomm_copy_to_guest( + void *to, + const void *from, + unsigned int n, + unsigned int skip) +{ + struct xencomm_desc *desc; + unsigned long desc_addr; + unsigned int from_pos = 0; + unsigned int to_pos = 0; + unsigned int i = 0; + + if (xencomm_debug) + printf ("xencomm_copy_to_guest: to=%lx+%u n=%u\n", + (unsigned long)to, skip, n); + + if (XENCOMM_IS_INLINE(to)) { + unsigned long dest_paddr = XENCOMM_INLINE_ADDR(to); + + dest_paddr += skip; + + while (n > 0) { + unsigned int chunksz; + unsigned int bytes; + int res; + + chunksz = PAGE_SIZE - (dest_paddr % PAGE_SIZE); + + bytes = min(chunksz, n); + + res = xencomm_copy_chunk_to(dest_paddr, (unsigned long)from, bytes); + if (res != 0) + return res; + + dest_paddr += bytes; + from += bytes; + n -= bytes; + } + + /* Always successful. */ + return 0; + } + + /* first we need to access the descriptor */ + desc_addr = xencomm_paddr_to_maddr((unsigned long)to); + if (desc_addr == 0) + return -EFAULT; + + desc = (struct xencomm_desc *)desc_addr; + if (desc->magic != XENCOMM_MAGIC) { + printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); + return -EFAULT; + } + + /* iterate through the descriptor, copying up to a page at a time */ + while ((from_pos < n) && (i < desc->nr_addrs)) { + unsigned long dest_paddr = desc->address[i]; + unsigned int pgoffset; + unsigned int chunksz; + unsigned int chunk_skip; + + if (dest_paddr == XENCOMM_INVALID) { + i++; + continue; + } + + pgoffset = dest_paddr % PAGE_SIZE; + chunksz = PAGE_SIZE - pgoffset; + + chunk_skip = min(chunksz, skip); + to_pos += chunk_skip; + chunksz -= chunk_skip; + skip -= chunk_skip; + dest_paddr += chunk_skip; + + if (skip == 0) { + unsigned int bytes = min(chunksz, n - from_pos); + int res; + + res = xencomm_copy_chunk_to(dest_paddr, + (unsigned long)from + from_pos, bytes); + if (res != 0) + return res; + + from_pos += bytes; + to_pos += bytes; + } + + i++; + } + return n - from_pos; +} + +/* Offset page addresses in 'handle' to skip 'bytes' bytes. Set completely + * exhausted pages to XENCOMM_INVALID. */ +void * +xencomm_add_offset( + void *handle, + unsigned int bytes) +{ + struct xencomm_desc *desc; + unsigned long desc_addr; + int i = 0; + + if (XENCOMM_IS_INLINE(handle)) + return (void *)((unsigned long)handle + bytes); + + /* first we need to access the descriptor */ + desc_addr = xencomm_paddr_to_maddr((unsigned long)handle); + if (desc_addr == 0) + return NULL; + + desc = (struct xencomm_desc *)desc_addr; + if (desc->magic != XENCOMM_MAGIC) { + printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); + return NULL; + } + + /* iterate through the descriptor incrementing addresses */ + while ((bytes > 0) && (i < desc->nr_addrs)) { + unsigned long dest_paddr = desc->address[i]; + unsigned int pgoffset; + unsigned int chunksz; + unsigned int chunk_skip; + + pgoffset = dest_paddr % PAGE_SIZE; + chunksz = PAGE_SIZE - pgoffset; + + chunk_skip = min(chunksz, bytes); + if (chunk_skip == chunksz) { + /* exhausted this page */ + desc->address[i] = XENCOMM_INVALID; + } else { + desc->address[i] += chunk_skip; + } + bytes -= chunk_skip; + } + return handle; +} + +int +xencomm_handle_is_null( + void *ptr) +{ + if (XENCOMM_IS_INLINE(ptr)) + return XENCOMM_INLINE_ADDR(ptr) == 0; + else { + struct xencomm_desc *desc; + unsigned long desc_addr; + + desc_addr = xencomm_paddr_to_maddr((unsigned long)ptr); + if (desc_addr == 0) + return 1; + + desc = (struct xencomm_desc *)desc_addr; + return (desc->address[0] == XENCOMM_INVALID); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ xen/arch/ia64/xen/xenpatch.c Wed Oct 11 16:10:40 2006 -0400 @@ -0,0 +1,122 @@ +/****************************************************************************** + * xenpatch.c + * Copyright (c) 2006 Silicon Graphics Inc. + * Jes Sorensen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Parts of this based on code from arch/ia64/kernel/patch.c + */ + +#include +#include +#include +#include + +/* + * This was adapted from code written by Tony Luck: + * + * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle + * like this: + * + * 6 6 5 4 3 2 1 + * 3210987654321098765432109876543210987654321098765432109876543210 + * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG + * + * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB + */ +static u64 +get_imm64 (u64 insn_addr) +{ + u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */ + + return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/ + ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/ + ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/ + ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/ + ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/ + ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/ + ((p[1] & 0x000007f000000000UL) >> 36); /*G*/ +} + +/* Patch instruction with "val" where "mask" has 1 bits. */ +void +ia64_patch (u64 insn_addr, u64 mask, u64 val) +{ + u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); +#define insn_mask ((1UL << 41) - 1) + unsigned long shift; + + b0 = b[0]; b1 = b[1]; + /* 5 bits of template, then 3 x 41-bit instructions */ + shift = 5 + 41 * (insn_addr % 16); + if (shift >= 64) { + m1 = mask << (shift - 64); + v1 = val << (shift - 64); + } else { + m0 = mask << shift; m1 = mask >> (64 - shift); + v0 = val << shift; v1 = val >> (64 - shift); + b[0] = (b0 & ~m0) | (v0 & m0); + } + b[1] = (b1 & ~m1) | (v1 & m1); +} + +void +ia64_patch_imm64 (u64 insn_addr, u64 val) +{ + /* The assembler may generate offset pointing to either slot 1 + or slot 2 for a long (2-slot) instruction, occupying slots 1 + and 2. */ + insn_addr &= -16UL; + ia64_patch(insn_addr + 2, 0x01fffefe000UL, + (((val & 0x8000000000000000UL) >> 27) | /* bit 63 -> 36 */ + ((val & 0x0000000000200000UL) << 0) | /* bit 21 -> 21 */ + ((val & 0x00000000001f0000UL) << 6) | /* bit 16 -> 22 */ + ((val & 0x000000000000ff80UL) << 20) | /* bit 7 -> 27 */ + ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); + ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); +} + +extern char frametable_miss; +extern unsigned long xen_pstart; + +/* + * Add more patch points in seperate functions as appropriate + */ + +static void xen_patch_frametable_miss(u64 offset) +{ + u64 addr, val; + + addr = (u64)&frametable_miss; + val = get_imm64(addr) + offset; + ia64_patch_imm64(addr, val); +} + + +void xen_patch_kernel(void) +{ + unsigned long patch_offset; + + patch_offset = xen_pstart - (KERNEL_START - PAGE_OFFSET); + + printk("Xen patching physical address access by offset: " + "0x%lx\n", patch_offset); + + xen_patch_frametable_miss(patch_offset); + + ia64_sync_i(); + ia64_srlz_i(); +}