[IA64] remove several warning and fixes related to VPD and itr. diff --git a/xen/arch/ia64/linux-xen/mca_asm.S b/xen/arch/ia64/linux-xen/mca_asm.S --- a/xen/arch/ia64/linux-xen/mca_asm.S +++ b/xen/arch/ia64/linux-xen/mca_asm.S @@ -332,31 +332,24 @@ ;; // 6. mapped_regs + // and + // 7. VPD + // The VPD will not be mapped in the case where + // a VMX domain hasn't been started since boot GET_THIS_PADDR(r2, inserted_mapped_regs);; ld8 r16=[r2] mov r18=XMAPPEDREGS_SHIFT<<2 ;; ptr.d r16,r18 ;; - srlz.d - ;; - - // 7. VPD - // The VPD will not be mapped in the case where - // a VMX domain hasn't been started since boot - GET_THIS_PADDR(r2, inserted_vpd);; - ld8 r16=[r2] - mov r18=XMAPPEDREGS_SHIFT<<2 - ;; - cmp.eq p7,p0=r2,r0 - ;; -(p7) br.cond.sptk .vpd_not_mapped - ;; - ptr.d r16,r18 + ptr.i r16,r18 // 7. VPD + // VPD is pinned by both DTR and ITR. + // Here PV case doesn't excluded. + // ptr.i for PV case doesn't harm anyway so that + // issue ptr.i unconditionally. ;; srlz.i ;; -.vpd_not_mapped: // 8. VHPT // GET_VA_VCPU_VHPT_MADDR() may not give the diff --git a/xen/arch/ia64/vmx/vmx_entry.S b/xen/arch/ia64/vmx/vmx_entry.S --- a/xen/arch/ia64/vmx/vmx_entry.S +++ b/xen/arch/ia64/vmx/vmx_entry.S @@ -608,8 +608,8 @@ IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC) #define PSR_BITS_TO_SET IA64_PSR_BN -//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * pal_vaddr, void * shared_arch_info ); -GLOBAL_ENTRY(vmx_switch_rr7) +//extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * pal_vaddr, void * shared_arch_info ); +GLOBAL_ENTRY(__vmx_switch_rr7) // not sure this unwind statement is correct... .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) alloc loc1 = ar.pfs, 4, 8, 0, 0 @@ -777,4 +777,4 @@ mov ar.rsc=loc3 // restore RSE configuration srlz.d // seralize restoration of psr.l br.ret.sptk.many rp -END(vmx_switch_rr7) +END(__vmx_switch_rr7) diff --git a/xen/arch/ia64/vmx/vmx_phy_mode.c b/xen/arch/ia64/vmx/vmx_phy_mode.c --- a/xen/arch/ia64/vmx/vmx_phy_mode.c +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c @@ -170,10 +170,7 @@ ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5]))); ia64_dv_serialize_data(); ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6]))); - ia64_dv_serialize_data(); - __get_cpu_var(inserted_vhpt) = vcpu->arch.vhpt.hash; - __get_cpu_var(inserted_vpd) = vcpu->arch.privregs; - __get_cpu_var(inserted_mapped_regs) = vcpu->arch.privregs; + ia64_dv_serialize_data(); vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])), (void *)vcpu->arch.vhpt.hash, pal_vaddr, vcpu->arch.privregs); diff --git a/xen/arch/ia64/vmx/vmx_vcpu.c b/xen/arch/ia64/vmx/vmx_vcpu.c --- a/xen/arch/ia64/vmx/vmx_vcpu.c +++ b/xen/arch/ia64/vmx/vmx_vcpu.c @@ -196,6 +196,14 @@ } } +void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void *pal_vaddr, + void *shared_arch_info) +{ + __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt; + __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info; + __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info); +} + IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val) { u64 rrval; @@ -208,13 +216,9 @@ VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val; switch((u64)(reg>>VRN_SHIFT)) { case VRN7: - if (likely(vcpu == current)) { - __get_cpu_var(inserted_vhpt) = vcpu->arch.vhpt.hash; - __get_cpu_var(inserted_vpd) = vcpu->arch.privregs; - __get_cpu_var(inserted_mapped_regs) = vcpu->arch.privregs; - vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash, + if (likely(vcpu == current)) + vmx_switch_rr7(vrrtomrr(vcpu, val), (void *)vcpu->arch.vhpt.hash, pal_vaddr, vcpu->arch.privregs); - } break; case VRN4: rrval = vrrtomrr(vcpu,val); diff --git a/xen/arch/ia64/xen/regionreg.c b/xen/arch/ia64/xen/regionreg.c --- a/xen/arch/ia64/xen/regionreg.c +++ b/xen/arch/ia64/xen/regionreg.c @@ -55,7 +55,6 @@ DEFINE_PER_CPU(unsigned long, inserted_vhpt); DEFINE_PER_CPU(unsigned long, inserted_shared_info); DEFINE_PER_CPU(unsigned long, inserted_mapped_regs); -DEFINE_PER_CPU(unsigned long, inserted_vpd); #if 0 // following already defined in include/asm-ia64/gcc_intrin.h @@ -300,7 +299,7 @@ } else { if (current && VMX_DOMAIN(current)) - vpd = __get_cpu_var(inserted_vpd); + vpd = __get_cpu_var(inserted_mapped_regs); ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(), percpu_set), vpd); } diff --git a/xen/include/asm-ia64/regionreg.h b/xen/include/asm-ia64/regionreg.h --- a/xen/include/asm-ia64/regionreg.h +++ b/xen/include/asm-ia64/regionreg.h @@ -39,7 +39,6 @@ DECLARE_PER_CPU(unsigned long, inserted_vhpt); DECLARE_PER_CPU(unsigned long, inserted_shared_info); DECLARE_PER_CPU(unsigned long, inserted_mapped_regs); -DECLARE_PER_CPU(unsigned long, inserted_vpd); extern cpumask_t percpu_set; diff --git a/xen/include/asm-ia64/vmx_vcpu.h b/xen/include/asm-ia64/vmx_vcpu.h --- a/xen/include/asm-ia64/vmx_vcpu.h +++ b/xen/include/asm-ia64/vmx_vcpu.h @@ -103,6 +103,7 @@ extern uint64_t guest_read_vivr(VCPU * vcpu); extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector); extern void vcpu_load_kernel_regs(VCPU * vcpu); +extern void __vmx_switch_rr7(unsigned long, void *, void *, void *); extern void vmx_switch_rr7(unsigned long, void *, void *, void *); extern void vmx_ia64_set_dcr(VCPU * v); extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec); diff --git a/xen/include/asm-ia64/xenpage.h b/xen/include/asm-ia64/xenpage.h --- a/xen/include/asm-ia64/xenpage.h +++ b/xen/include/asm-ia64/xenpage.h @@ -101,7 +101,7 @@ * __IA64_EFI_CACHED_OFFSET is activated in efi_enter_virtual_mode() */ #if 0 -#define __va_efi(x) ((unsigned long)(x) | __IA64_EFI_CACHED_OFFSET) +#define __va_efi(x) ((void*)((unsigned long)(x) | __IA64_EFI_CACHED_OFFSET)) #else #define __va_efi(x) __va(x) #endif