diff -uNr a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c 2008-07-22 18:37:51.000000000 +0100 +++ b/xen/arch/x86/hvm/hvm.c 2008-08-05 13:30:24.168299605 +0100 @@ -772,7 +772,7 @@ do_sched_op_compat(SCHEDOP_block, 0); - HVMTRACE_1D(HLT, curr, /* pending = */ vcpu_runnable(curr)); + HVMTRACE_1D(HLT, /* pending = */ vcpu_runnable(curr)); } void hvm_triple_fault(void) diff -uNr a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c 2008-06-12 16:58:20.000000000 +0100 +++ b/xen/arch/x86/hvm/svm/intr.c 2008-08-05 13:30:24.168299605 +0100 @@ -80,7 +80,7 @@ ASSERT(intack.source != hvm_intsrc_none); - HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1); + HVMTRACE_2D(INJ_VIRQ, 0x0, /*fake=*/ 1); /* * Create a dummy virtual interrupt to intercept as soon as the @@ -199,7 +199,7 @@ } else { - HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0); + HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); svm_inject_extint(v, intack.vector); pt_intr_post(v, intack); } diff -uNr a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c 2008-08-05 13:21:13.358299605 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c 2008-08-05 13:30:24.168299605 +0100 @@ -759,11 +759,11 @@ if ( trapnr == TRAP_page_fault ) { vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2; - HVMTRACE_LONG_2D(PF_INJECT, curr, errcode, TRC_PAR_LONG(cr2)); + HVMTRACE_LONG_2D(PF_INJECT, errcode, TRC_PAR_LONG(cr2)); } else { - HVMTRACE_2D(INJ_EXC, curr, trapnr, errcode); + HVMTRACE_2D(INJ_EXC, trapnr, errcode); } if ( (trapnr == TRAP_debug) && @@ -919,7 +919,7 @@ __clear_bit(X86_FEATURE_APIC & 31, edx); } - HVMTRACE_5D (CPUID, v, input, *eax, *ebx, *ecx, *edx); + HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx); } static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs) @@ -946,7 +946,7 @@ static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs) { - HVMTRACE_0D(DR_WRITE, v); + HVMTRACE_0D(DR_WRITE); __restore_debug_registers(v); } @@ -1018,7 +1018,7 @@ regs->edx = msr_content >> 32; done: - HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx); + HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx); HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); return X86EMUL_OKAY; @@ -1037,7 +1037,7 @@ msr_content = (u32)regs->eax | ((u64)regs->edx << 32); - HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx); + HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx); switch ( ecx ) { @@ -1157,7 +1157,7 @@ static void svm_invlpg_intercept(unsigned long vaddr) { struct vcpu *curr = current; - HVMTRACE_LONG_2D(INVLPG, curr, 0, TRC_PAR_LONG(vaddr)); + HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr)); paging_invlpg(curr, vaddr); svm_asid_g_invlpg(curr, vaddr); } @@ -1180,7 +1180,7 @@ exit_reason = vmcb->exitcode; - HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason, + HVMTRACE_ND(VMEXIT64, 1/*cycles*/, 3, exit_reason, (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32), 0, 0, 0); @@ -1205,17 +1205,17 @@ { case VMEXIT_INTR: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ - HVMTRACE_0D(INTR, v); + HVMTRACE_0D(INTR); break; case VMEXIT_NMI: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ - HVMTRACE_0D(NMI, v); + HVMTRACE_0D(NMI); break; case VMEXIT_SMI: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ - HVMTRACE_0D(SMI, v); + HVMTRACE_0D(SMI); break; case VMEXIT_EXCEPTION_DB: @@ -1251,9 +1251,9 @@ if ( paging_fault(va, regs) ) { if (hvm_long_mode_enabled(v)) - HVMTRACE_LONG_2D(PF_XEN, v, regs->error_code, TRC_PAR_LONG(va)); + HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va)); else - HVMTRACE_2D(PF_XEN, v, regs->error_code, va); + HVMTRACE_2D(PF_XEN, regs->error_code, va); break; } @@ -1263,7 +1263,7 @@ /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ case VMEXIT_EXCEPTION_MC: - HVMTRACE_0D(MCE, v); + HVMTRACE_0D(MCE); break; case VMEXIT_VINTR: @@ -1320,7 +1320,7 @@ case VMEXIT_VMMCALL: if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 ) break; - HVMTRACE_1D(VMMCALL, v, regs->eax); + HVMTRACE_1D(VMMCALL, regs->eax); rc = hvm_do_hypercall(regs); if ( rc != HVM_HCALL_preempted ) { @@ -1395,7 +1395,7 @@ asmlinkage void svm_trace_vmentry(void) { - HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0); + HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0); } /* diff -uNr a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c --- a/xen/arch/x86/hvm/vmx/intr.c 2008-06-01 13:51:38.000000000 +0100 +++ b/xen/arch/x86/hvm/vmx/intr.c 2008-08-05 13:30:24.168299605 +0100 @@ -198,7 +198,7 @@ } else { - HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0); + HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); vmx_inject_extint(v, intack.vector); pt_intr_post(v, intack); } diff -uNr a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c 2008-08-05 13:21:13.368299605 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c 2008-08-05 13:30:24.168299605 +0100 @@ -1114,10 +1114,10 @@ __vmwrite(VM_ENTRY_INTR_INFO, intr_fields); if ( trap == TRAP_page_fault ) - HVMTRACE_LONG_2D(PF_INJECT, v, error_code, + HVMTRACE_LONG_2D(PF_INJECT, error_code, TRC_PAR_LONG(v->arch.hvm_vcpu.guest_cr[2])); else - HVMTRACE_2D(INJ_EXC, v, trap, error_code); + HVMTRACE_2D(INJ_EXC, trap, error_code); } void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code) @@ -1345,7 +1345,7 @@ break; } - HVMTRACE_5D (CPUID, current, input, *eax, *ebx, *ecx, *edx); + HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx); } static void vmx_do_cpuid(struct cpu_user_regs *regs) @@ -1370,7 +1370,7 @@ { struct vcpu *v = current; - HVMTRACE_0D(DR_WRITE, v); + HVMTRACE_0D(DR_WRITE); if ( !v->arch.hvm_vcpu.flag_dr_dirty ) __restore_debug_registers(v); @@ -1383,7 +1383,7 @@ static void vmx_invlpg_intercept(unsigned long vaddr) { struct vcpu *curr = current; - HVMTRACE_LONG_2D(INVLPG, curr, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr)); + HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr)); if ( paging_invlpg(curr, vaddr) ) vpid_sync_vcpu_gva(curr, vaddr); } @@ -1434,7 +1434,7 @@ goto exit_and_crash; } - HVMTRACE_LONG_2D(CR_WRITE, v, cr, TRC_PAR_LONG(value)); + HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value)); HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value); @@ -1505,7 +1505,7 @@ break; } - HVMTRACE_LONG_2D(CR_READ, v, cr, TRC_PAR_LONG(value)); + HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value)); HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value); } @@ -1531,13 +1531,13 @@ case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; vmx_update_guest_cr(v, 0); - HVMTRACE_0D(CLTS, current); + HVMTRACE_0D(CLTS); break; case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: value = v->arch.hvm_vcpu.guest_cr[0]; /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */ value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf); - HVMTRACE_LONG_1D(LMSW, current, value); + HVMTRACE_LONG_1D(LMSW, value); return !hvm_set_cr0(value); default: BUG(); @@ -1692,7 +1692,7 @@ regs->edx = (uint32_t)(msr_content >> 32); done: - HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx); + HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx); HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); @@ -1803,7 +1803,7 @@ msr_content = (u32)regs->eax | ((u64)regs->edx << 32); - HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx); + HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx); switch ( ecx ) { @@ -1894,7 +1894,7 @@ BUG_ON(!(vector & INTR_INFO_VALID_MASK)); vector &= INTR_INFO_VECTOR_MASK; - HVMTRACE_1D(INTR, current, vector); + HVMTRACE_1D(INTR, vector); switch ( vector ) { @@ -2010,7 +2010,7 @@ break; case EXIT_REASON_MACHINE_CHECK: printk("caused by machine check.\n"); - HVMTRACE_0D(MCE, curr); + HVMTRACE_0D(MCE); do_machine_check(regs); break; default: @@ -2037,7 +2037,7 @@ exit_reason = __vmread(VM_EXIT_REASON); - HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason, + HVMTRACE_ND(VMEXIT64, 1/*cycles*/, 3, exit_reason, (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32), 0, 0, 0); @@ -2129,10 +2129,10 @@ if ( paging_fault(exit_qualification, regs) ) { if ( hvm_long_mode_enabled(v) ) - HVMTRACE_LONG_2D (PF_XEN, v, regs->error_code, + HVMTRACE_LONG_2D (PF_XEN, regs->error_code, TRC_PAR_LONG(exit_qualification) ); else - HVMTRACE_2D (PF_XEN, v, + HVMTRACE_2D (PF_XEN, regs->error_code, exit_qualification ); break; } @@ -2144,11 +2144,11 @@ if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) != (X86_EVENTTYPE_NMI << 8) ) goto exit_and_crash; - HVMTRACE_0D(NMI, v); + HVMTRACE_0D(NMI); do_nmi(regs); /* Real NMI, vector 2: normal processing. */ break; case TRAP_machine_check: - HVMTRACE_0D(MCE, v); + HVMTRACE_0D(MCE); do_machine_check(regs); break; default: @@ -2213,7 +2213,7 @@ case EXIT_REASON_VMCALL: { int rc; - HVMTRACE_1D(VMMCALL, v, regs->eax); + HVMTRACE_1D(VMMCALL, regs->eax); inst_len = __get_instruction_length(); /* Safe: VMCALL */ rc = hvm_do_hypercall(regs); if ( rc != HVM_HCALL_preempted ) @@ -2300,7 +2300,7 @@ asmlinkage void vmx_trace_vmentry(void) { - HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0); + HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0); } /* diff -uNr a/xen/include/asm-x86/hvm/trace.h b/xen/include/asm-x86/hvm/trace.h --- a/xen/include/asm-x86/hvm/trace.h 2008-06-12 16:58:20.000000000 +0100 +++ b/xen/include/asm-x86/hvm/trace.h 2008-08-05 13:30:24.168299605 +0100 @@ -56,16 +56,13 @@ #define TRC_PAR_LONG(par) (par) #endif -#define HVMTRACE_ND(evt, cycles, vcpu, count, d1, d2, d3, d4, d5, d6) \ +#define HVMTRACE_ND(evt, cycles, count, d1, d2, d3, d4, d5, d6) \ do { \ if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \ { \ struct { \ - u32 did:16, vid:16; \ u32 d[6]; \ } _d; \ - _d.did=(vcpu)->domain->domain_id; \ - _d.vid=(vcpu)->vcpu_id; \ _d.d[0]=(d1); \ _d.d[1]=(d2); \ _d.d[2]=(d3); \ @@ -77,32 +74,32 @@ } \ } while(0) -#define HVMTRACE_6D(evt, vcpu, d1, d2, d3, d4, d5, d6) \ - HVMTRACE_ND(evt, 0, vcpu, 6, d1, d2, d3, d4, d5, d6) -#define HVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5) \ - HVMTRACE_ND(evt, 0, vcpu, 5, d1, d2, d3, d4, d5, 0) -#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) \ - HVMTRACE_ND(evt, 0, vcpu, 4, d1, d2, d3, d4, 0, 0) -#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) \ - HVMTRACE_ND(evt, 0, vcpu, 3, d1, d2, d3, 0, 0, 0) -#define HVMTRACE_2D(evt, vcpu, d1, d2) \ - HVMTRACE_ND(evt, 0, vcpu, 2, d1, d2, 0, 0, 0, 0) -#define HVMTRACE_1D(evt, vcpu, d1) \ - HVMTRACE_ND(evt, 0, vcpu, 1, d1, 0, 0, 0, 0, 0) -#define HVMTRACE_0D(evt, vcpu) \ - HVMTRACE_ND(evt, 0, vcpu, 0, 0, 0, 0, 0, 0, 0) +#define HVMTRACE_6D(evt, d1, d2, d3, d4, d5, d6) \ + HVMTRACE_ND(evt, 0, 6, d1, d2, d3, d4, d5, d6) +#define HVMTRACE_5D(evt, d1, d2, d3, d4, d5) \ + HVMTRACE_ND(evt, 0, 5, d1, d2, d3, d4, d5, 0) +#define HVMTRACE_4D(evt, d1, d2, d3, d4) \ + HVMTRACE_ND(evt, 0, 4, d1, d2, d3, d4, 0, 0) +#define HVMTRACE_3D(evt, d1, d2, d3) \ + HVMTRACE_ND(evt, 0, 3, d1, d2, d3, 0, 0, 0) +#define HVMTRACE_2D(evt, d1, d2) \ + HVMTRACE_ND(evt, 0, 2, d1, d2, 0, 0, 0, 0) +#define HVMTRACE_1D(evt, d1) \ + HVMTRACE_ND(evt, 0, 1, d1, 0, 0, 0, 0, 0) +#define HVMTRACE_0D(evt) \ + HVMTRACE_ND(evt, 0, 0, 0, 0, 0, 0, 0, 0) #ifdef __x86_64__ -#define HVMTRACE_LONG_1D(evt, vcpu, d1) \ - HVMTRACE_2D(evt ## 64, vcpu, (d1) & 0xFFFFFFFF, (d1) >> 32) -#define HVMTRACE_LONG_2D(evt,vcpu,d1,d2, ...) \ - HVMTRACE_3D(evt ## 64, vcpu, d1, d2) -#define HVMTRACE_LONG_3D(evt, vcpu, d1, d2, d3, ...) \ - HVMTRACE_4D(evt ## 64, vcpu, d1, d2, d3) -#define HVMTRACE_LONG_4D(evt, vcpu, d1, d2, d3, d4, ...) \ - HVMTRACE_5D(evt ## 64, vcpu, d1, d2, d3, d4) +#define HVMTRACE_LONG_1D(evt, d1) \ + HVMTRACE_2D(evt ## 64, (d1) & 0xFFFFFFFF, (d1) >> 32) +#define HVMTRACE_LONG_2D(evt, d1, d2, ...) \ + HVMTRACE_3D(evt ## 64, d1, d2) +#define HVMTRACE_LONG_3D(evt, d1, d2, d3, ...) \ + HVMTRACE_4D(evt ## 64, d1, d2, d3) +#define HVMTRACE_LONG_4D(evt, d1, d2, d3, d4, ...) \ + HVMTRACE_5D(evt ## 64, d1, d2, d3, d4) #else #define HVMTRACE_LONG_1D HVMTRACE_1D #define HVMTRACE_LONG_2D HVMTRACE_2D