# HG changeset patch # User gingold@saphi # Node ID 72dd57f88de3b7c2135c4d01c8aa13f71e7a32c6 # Parent 9529d667d0426eb02077cb33abdc6e434e27d293 Cleanup: static added, unused code #if'ed, typos. Signed-off-by: Tristan Gingold diff -r 9529d667d042 -r 72dd57f88de3 xen/arch/ia64/vmx/vmx_interrupt.c --- a/xen/arch/ia64/vmx/vmx_interrupt.c Thu Feb 15 21:09:39 2007 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c Sat Feb 17 03:28:54 2007 @@ -20,15 +20,15 @@ * Xiaoyan Feng (Fleming Feng) * Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx) */ - - #include #include #include #include + /* SDM vol2 5.5 - IVA based interruption handling */ #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034 -void + +static void collect_interruption(VCPU *vcpu) { u64 ipsr; @@ -92,14 +92,19 @@ u64 viva; REGS *regs; ISR pt_isr; + perfc_incra(vmx_inject_guest_interruption, vec >> 8); - regs=vcpu_regs(vcpu); - // clear cr.isr.ri + + regs = vcpu_regs(vcpu); + + // clear cr.isr.ir (incomplete register frame) pt_isr.val = VMX(vcpu,cr_isr); pt_isr.ir = 0; VMX(vcpu,cr_isr) = pt_isr.val; + collect_interruption(vcpu); vmx_ia64_set_dcr(vcpu); + vmx_vcpu_get_iva(vcpu,&viva); regs->cr_iip = viva + vec; } diff -r 9529d667d042 -r 72dd57f88de3 xen/arch/ia64/vmx/vmx_support.c --- a/xen/arch/ia64/vmx/vmx_support.c Thu Feb 15 21:09:39 2007 +++ b/xen/arch/ia64/vmx/vmx_support.c Sat Feb 17 03:28:54 2007 @@ -44,7 +44,8 @@ */ vio = get_vio(v->domain, v->vcpu_id); if (!vio) - panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio); + panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", + (unsigned long)vio); p = &vio->vp_ioreq; @@ -98,9 +99,3 @@ /* the code under this line is completer phase... */ vmx_io_assist(v); } - -/* Wake up a vcpu whihc is waiting for interrupts to come in */ -void vmx_prod_vcpu(struct vcpu *v) -{ - vcpu_unblock(v); -} diff -r 9529d667d042 -r 72dd57f88de3 xen/arch/ia64/vmx/vmx_vcpu.c --- a/xen/arch/ia64/vmx/vmx_vcpu.c Thu Feb 15 21:09:39 2007 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Sat Feb 17 03:28:54 2007 @@ -22,7 +22,6 @@ * Yaozu Dong (Eddie Dong) (Eddie.dong@xxxxxxxxx) * Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx) */ - #include #include #include @@ -36,29 +35,6 @@ #include #include #include -//u64 fire_itc; -//u64 fire_itc2; -//u64 fire_itm; -//u64 fire_itm2; -/* - * Copyright (c) 2005 Intel Corporation. - * Anthony Xu (anthony.xu@xxxxxxxxx) - * Yaozu Dong (Eddie Dong) (Eddie.dong@xxxxxxxxx) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ /************************************************************************** VCPU general register access routines diff -r 9529d667d042 -r 72dd57f88de3 xen/arch/ia64/vmx/vmx_virt.c --- a/xen/arch/ia64/vmx/vmx_virt.c Thu Feb 15 21:09:39 2007 +++ b/xen/arch/ia64/vmx/vmx_virt.c Sat Feb 17 03:28:54 2007 @@ -31,7 +31,8 @@ #include #include -void +#ifdef BYPASS_VMAL_OPCODE +static void ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause) { *cause=0; @@ -141,20 +142,21 @@ break; } } - -IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst) +#endif + +static IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst) { u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; return vmx_vcpu_reset_psr_sm(vcpu,imm24); } -IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst) { u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; return vmx_vcpu_set_psr_sm(vcpu,imm24); } -IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst) { u64 tgt = inst.M33.r1; u64 val; @@ -172,7 +174,7 @@ /** * @todo Check for reserved bits and return IA64_RSVDREG_FAULT. */ -IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst) { u64 val; @@ -187,7 +189,7 @@ Privileged operation emulation routines **************************************************************************/ -IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst) { IA64_PSR vpsr; REGS *regs; @@ -209,7 +211,7 @@ return vmx_vcpu_rfi(vcpu); } -IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst) { #ifdef CHECK_FAULT IA64_PSR vpsr; @@ -224,7 +226,7 @@ return vcpu_bsw0(vcpu); } -IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst) { #ifdef CHECK_FAULT IA64_PSR vpsr; @@ -239,12 +241,12 @@ return vcpu_bsw1(vcpu); } -IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst) { return vmx_vcpu_cover(vcpu); } -IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst) { u64 r2,r3; #ifdef VMAL_NO_FAULT_CHECK @@ -278,7 +280,7 @@ return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7)); } -IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst) { u64 r3; #ifdef VMAL_NO_FAULT_CHECK @@ -303,7 +305,7 @@ return vmx_vcpu_ptc_e(vcpu,r3); } -IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst) { u64 r2,r3; #ifdef VMAL_NO_FAULT_CHECK @@ -336,7 +338,7 @@ return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7)); } -IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst) { u64 r2,r3; #ifdef VMAL_NO_FAULT_CHECK @@ -369,7 +371,7 @@ return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7)); } -IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3) +static IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3) { IA64FAULT ret1, ret2; @@ -403,7 +405,7 @@ return IA64_NO_FAULT; } -IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst) { u64 r2,r3; if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) @@ -411,7 +413,7 @@ return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7)); } -IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst) { u64 r2,r3; if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) @@ -420,7 +422,7 @@ } -IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst) { u64 r1,r3; #ifdef CHECK_FAULT @@ -450,7 +452,7 @@ } -IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst) { u64 r1,r3; #ifdef CHECK_FAULT @@ -482,7 +484,7 @@ } -IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst) { u64 r1,r3; #ifdef CHECK_FAULT @@ -526,7 +528,7 @@ return(IA64_NO_FAULT); } -IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst) { u64 r1,r3; #ifdef CHECK_FAULT @@ -564,7 +566,7 @@ * Insert translation register/cache ************************************/ -IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst) { u64 itir, ifa, pte, slot; #ifdef VMAL_NO_FAULT_CHECK @@ -621,7 +623,7 @@ return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa)); } -IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst) { u64 itir, ifa, pte, slot; #ifdef VMAL_NO_FAULT_CHECK @@ -678,7 +680,7 @@ return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa)); } -IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte) +static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte) { IA64FAULT ret1; @@ -727,7 +729,7 @@ return IA64_NO_FAULT; } -IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst) { u64 itir, ifa, pte; @@ -738,7 +740,7 @@ return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa)); } -IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst) { u64 itir, ifa, pte; @@ -754,7 +756,7 @@ * Moves to semi-privileged registers *************************************/ -IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst) { // I27 and M30 are identical for these fields u64 imm; @@ -780,7 +782,7 @@ return (vmx_vcpu_set_itc(vcpu, imm)); } -IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst) { // I26 and M29 are identical for these fields u64 r2; @@ -808,7 +810,7 @@ } -IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst) { // I27 and M30 are identical for these fields u64 r1; @@ -840,7 +842,7 @@ * Moves to privileged registers ********************************/ -IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst) { u64 r3,r2; #ifdef CHECK_FAULT @@ -863,7 +865,7 @@ return (vmx_vcpu_set_pkr(vcpu,r3,r2)); } -IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst) { u64 r3,r2; #ifdef CHECK_FAULT @@ -886,7 +888,7 @@ return (vmx_vcpu_set_rr(vcpu,r3,r2)); } -IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst) { u64 r3,r2; return IA64_NO_FAULT; @@ -910,7 +912,7 @@ return (vmx_vcpu_set_dbr(vcpu,r3,r2)); } -IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst) { u64 r3,r2; return IA64_NO_FAULT; @@ -934,7 +936,7 @@ return (vmx_vcpu_set_ibr(vcpu,r3,r2)); } -IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst) { u64 r3,r2; #ifdef CHECK_FAULT @@ -957,7 +959,7 @@ return (vmx_vcpu_set_pmc(vcpu,r3,r2)); } -IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst) { u64 r3,r2; #ifdef CHECK_FAULT @@ -985,7 +987,7 @@ * Moves from privileged registers **********************************/ -IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst) { u64 r3,r1; #ifdef CHECK_FAULT @@ -1021,7 +1023,7 @@ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } -IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst) { u64 r3,r1; #ifdef CHECK_FAULT @@ -1058,7 +1060,7 @@ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } -IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst) { u64 r3,r1; #ifdef CHECK_FAULT @@ -1095,7 +1097,7 @@ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } -IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst) { u64 r3,r1; #ifdef CHECK_FAULT @@ -1132,7 +1134,7 @@ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } -IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst) { u64 r3,r1; #ifdef CHECK_FAULT @@ -1169,7 +1171,7 @@ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } -IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst) { u64 r3,r1; #ifdef CHECK_FAULT @@ -1197,7 +1199,7 @@ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } -IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst) { u64 r2; extern u64 cr_igfld_mask(int index, u64 value); @@ -1275,7 +1277,7 @@ ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ vcpu_set_gr(vcpu, tgt, val,0):fault; -IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst) +static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst) { u64 tgt = inst.M33.r1; u64 val;