On Fri, May 09, 2008 at 11:23:25AM +0800, Xu, Anthony wrote:
> This is updated one according to your comments except for belows.
Great. I found some xenoprof issue. Please see comments below..
I remembered that someone asked xenoprof doesn't work for VTi domain.
I suspect that similar issue existed.
> >> diff -r f2457c7aff8d xen/arch/ia64/vmx/vmx_phy_mode.c
> >> --- a/xen/arch/ia64/vmx/vmx_phy_mode.c Fri Apr 25 20:13:52 2008
> +0900
> >> +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Thu May 08 16:23:42 2008
> +0800
> >> @@ -252,8 +252,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
> >> switch_to_virtual_rid(vcpu);
> >> break;
> >> case SW_SELF:
> >> - printk("Switch to self-0x%lx!!! MM mode doesn't
> >> change...\n",
> >> - old_psr.val);
> >> +// printk("Switch to self-0x%lx!!! MM mode doesn't
> >> change...\n", +// old_psr.val); break;
> >> case SW_NOP:
> >> // printk("No action required for mode transition: (0x%lx ->
> >> 0x%lx)\n",
> >
> > What's the purpose here.
> > Anyway if you want this part, please create another patch.
> >
>
>
> Switch_mm_mode is called in fast path, since printk accesses PIO, it may
> trigger TLB fault, which can't be handled in fast path, due to psr.ic=0.
I see. I understand that with this patch, SW_SELF and SW_NOP case
can occur frequenstly.
> diff -r f2457c7aff8d xen/arch/ia64/vmx/vmx_vcpu.c
> --- a/xen/arch/ia64/vmx/vmx_vcpu.c Fri Apr 25 20:13:52 2008 +0900
> +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Fri May 09 10:58:37 2008 +0800
> @@ -172,11 +172,6 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
> {
> u64 rrval;
>
> - if (unlikely(is_reserved_rr_rid(vcpu, val))) {
> - gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
> - return IA64_RSVDREG_FAULT;
> - }
> -
> VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
> switch((u64)(reg>>VRN_SHIFT)) {
> case VRN7:
Without the check, VTi domain guest may access other domain's page
or xen's page. So it can't be dropped.
Do you find that calling is_reserved_rr_rid() is heavy?
If so, please make it an inline function.
> +void vmx_vcpu_mov_to_psr_fast(VCPU *vcpu, u64 value)
> +{
> + // TODO: Only allowed for current vcpu
> + u64 old_vpsr, new_vpsr, mipsr;
> + old_vpsr = VCPU(vcpu,vpsr);
>
> + new_vpsr = (old_vpsr & 0xffffffff00000000)
> + | ( value & 0xffffffff);
> + VCPU(vcpu, vpsr) = new_vpsr;
>
> + mipsr = ia64_getreg( _IA64_REG_CR_IPSR);
> + mipsr = (mipsr & 0xffffffff00000000) | ( value & 0xffffffff);
> + /* xenoprof:
> + * don't change psr.pp.
> + * It is manipulated by xenoprof.
> + */
> + mipsr |= IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT
> + | IA64_PSR_PP | IA64_PSR_SI | IA64_PSR_RT;
> +
> + if (FP_PSR(vcpu) & IA64_PSR_DFH)
> + mipsr |= IA64_PSR_DFH;
> +
> + ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
> +
> + switch_mm_mode(vcpu,(IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
> +
> +}
For xenoprof, ipsr.pp shouldn't be changed.
This code always sets ispsr.pp.
> +
> +
> +
> +extern void vmx_asm_bsw0(void);
> +extern void vmx_asm_bsw1(void);
Please move these declarations into header file.
> +#define IA64_PSR_MMU_VIRT (IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT)
> +void vmx_vcpu_rfi_fast(VCPU *vcpu)
> +{
> + // TODO: Only allowed for current vcpu
> + u64 vifs, vipsr, vpsr, mipsr, mask;
> + vipsr = VCPU(vcpu,ipsr);
> + vpsr = VCPU(vcpu,vpsr);
> + vifs = VCPU(vcpu,ifs);
> + if (vipsr&IA64_PSR_BN){
> + if(!(vpsr&IA64_PSR_BN))
> + vmx_asm_bsw1();
> + }
> + else if (vpsr&IA64_PSR_BN)
> + vmx_asm_bsw0();
> +
> + /*
> + * For those IA64_PSR bits: id/da/dd/ss/ed/ia
> + * Since these bits will become 0, after success execution of each
> + * instruction, we will change set them to mIA64_PSR
> + */
> + VCPU(vcpu, vpsr) = vipsr & (~ (IA64_PSR_ID |IA64_PSR_DA
> + | IA64_PSR_DD | IA64_PSR_ED | IA64_PSR_IA));
> +
> + /*
> + * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
> + * , except for the following bits:
> + * ic/i/dt/si/rt/mc/it/bn/vm
> + */
> + mask = (IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
> + IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
> + IA64_PSR_VM);
> + mipsr = ia64_getreg( _IA64_REG_CR_IPSR);
> + mipsr = (mipsr & mask ) | ( vipsr & (~mask) );
> + /* xenoprof */
> + mipsr |= IA64_PSR_PP;
> +
> + if (FP_PSR(vcpu) & IA64_PSR_DFH)
> + mipsr |= IA64_PSR_DFH;
> +
> + ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
> + vmx_ia64_set_dcr(vcpu);
> +
> + if(vifs>>63)
> + ia64_setreg(_IA64_REG_CR_IFS, vifs);
> +
> + ia64_setreg(_IA64_REG_CR_IIP, VCPU(vcpu,iip));
> +
> + switch_mm_mode(vcpu,(IA64_PSR)vpsr, (IA64_PSR)vipsr);
> +}
Ditto.
For xenoprof, ipsr.pp shouldn't be changed.
This code always sets ipsr.pp.
add IA64_PSR_PP to mask. not to mipsr.
> +
> +
> +void vmx_vcpu_ssm_fast(VCPU *vcpu, u64 imm24)
> +{
> + u64 old_vpsr, new_vpsr, mipsr;
> +
> + old_vpsr = VCPU(vcpu,vpsr);
> + new_vpsr = old_vpsr | imm24;
> +
> + VCPU(vcpu, vpsr) = new_vpsr;
> +
> + mipsr = ia64_getreg( _IA64_REG_CR_IPSR);
> + mipsr |= imm24;
> + ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
> +
> + switch_mm_mode(vcpu,(IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
> +}
Ditto. For xenoprof, ipsr.pp shouldn't be changed. Mask it out.
> +
> +void vmx_vcpu_rsm_fast(VCPU *vcpu, u64 imm24)
> +{
> + u64 old_vpsr, new_vpsr, mipsr;
> +
> + old_vpsr = VCPU(vcpu,vpsr);
> + new_vpsr = old_vpsr & ~imm24;
> +
> + VCPU(vcpu, vpsr) = new_vpsr;
> +
> + mipsr = ia64_getreg( _IA64_REG_CR_IPSR);
> + mipsr &= ~imm24;
> + /* xenoprof:
> + * don't change psr.pp.
> + * It is manipulated by xenoprof.
> + */
> + mipsr |= IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT
> + | IA64_PSR_PP | IA64_PSR_SI;
> +
> + if (FP_PSR(vcpu) & IA64_PSR_DFH)
> + mipsr |= IA64_PSR_DFH;
> +
> + ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
> +
> + switch_mm_mode(vcpu,(IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
> +}
Ditto. For xenoprof, ipsr.pp shouldn't be changed.
This code always sets ipsr.pp.
--
yamahata
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|