WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] xentrace 3/7: Remove vcpu / domain from H

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] xentrace 3/7: Remove vcpu / domain from HVM traces.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 08 Sep 2008 12:40:15 -0700
Delivery-date: Mon, 08 Sep 2008 12:40:45 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1220885523 -3600
# Node ID 1abe2bf8c0fad6efccf4be265ccfa77941fbedab
# Parent  0e3df63b993259263ca78f11bd0955e7055bb180
xentrace 3/7: Remove vcpu / domain from HVM traces.

This information is now redundant, as it can be determined using
runstate changes and lost record changes.  This reduces log size for
long-running traces by 10-30%.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Signed-off-by: Trolle Selander <trolle.selander@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          |    2 -
 xen/arch/x86/hvm/svm/intr.c     |    4 +--
 xen/arch/x86/hvm/svm/svm.c      |   32 +++++++++++++-------------
 xen/arch/x86/hvm/vmx/intr.c     |    2 -
 xen/arch/x86/hvm/vmx/vmx.c      |   40 ++++++++++++++++----------------
 xen/include/asm-x86/hvm/trace.h |   49 ++++++++++++++++++----------------------
 6 files changed, 63 insertions(+), 66 deletions(-)

diff -r 0e3df63b9932 -r 1abe2bf8c0fa xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Sep 08 15:50:33 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Mon Sep 08 15:52:03 2008 +0100
@@ -773,7 +773,7 @@ void hvm_hlt(unsigned long rflags)
 
     do_sched_op_compat(SCHEDOP_block, 0);
 
-    HVMTRACE_1D(HLT, curr, /* pending = */ vcpu_runnable(curr));
+    HVMTRACE_1D(HLT, /* pending = */ vcpu_runnable(curr));
 }
 
 void hvm_triple_fault(void)
diff -r 0e3df63b9932 -r 1abe2bf8c0fa xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c       Mon Sep 08 15:50:33 2008 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c       Mon Sep 08 15:52:03 2008 +0100
@@ -80,7 +80,7 @@ static void enable_intr_window(struct vc
 
     ASSERT(intack.source != hvm_intsrc_none);
 
-    HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
+    HVMTRACE_2D(INJ_VIRQ, 0x0, /*fake=*/ 1);
 
     /*
      * Create a dummy virtual interrupt to intercept as soon as the
@@ -199,7 +199,7 @@ asmlinkage void svm_intr_assist(void)
     }
     else
     {
-        HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
+        HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
         svm_inject_extint(v, intack.vector);
         pt_intr_post(v, intack);
     }
diff -r 0e3df63b9932 -r 1abe2bf8c0fa xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Sep 08 15:50:33 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon Sep 08 15:52:03 2008 +0100
@@ -759,11 +759,11 @@ static void svm_inject_exception(
     if ( trapnr == TRAP_page_fault )
     {
         vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2;
-        HVMTRACE_LONG_2D(PF_INJECT, curr, errcode, TRC_PAR_LONG(cr2));
+        HVMTRACE_LONG_2D(PF_INJECT, errcode, TRC_PAR_LONG(cr2));
     }
     else
     {
-        HVMTRACE_2D(INJ_EXC, curr, trapnr, errcode);
+        HVMTRACE_2D(INJ_EXC, trapnr, errcode);
     }
 
     if ( (trapnr == TRAP_debug) &&
@@ -919,7 +919,7 @@ static void svm_cpuid_intercept(
             __clear_bit(X86_FEATURE_APIC & 31, edx);
     }
 
-    HVMTRACE_5D (CPUID, v, input, *eax, *ebx, *ecx, *edx);
+    HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
 }
 
 static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
@@ -946,7 +946,7 @@ static void svm_vmexit_do_cpuid(struct c
 
 static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
 {
-    HVMTRACE_0D(DR_WRITE, v);
+    HVMTRACE_0D(DR_WRITE);
     __restore_debug_registers(v);
 }
 
@@ -1018,7 +1018,7 @@ static int svm_msr_read_intercept(struct
     regs->edx = msr_content >> 32;
 
  done:
-    HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
+    HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
     HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
                 ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
     return X86EMUL_OKAY;
@@ -1037,7 +1037,7 @@ static int svm_msr_write_intercept(struc
 
     msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
 
-    HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
+    HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
 
     switch ( ecx )
     {
@@ -1168,7 +1168,7 @@ static void svm_invlpg_intercept(unsigne
 static void svm_invlpg_intercept(unsigned long vaddr)
 {
     struct vcpu *curr = current;
-    HVMTRACE_LONG_2D(INVLPG, curr, 0, TRC_PAR_LONG(vaddr));
+    HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
     paging_invlpg(curr, vaddr);
     svm_asid_g_invlpg(curr, vaddr);
 }
@@ -1191,7 +1191,7 @@ asmlinkage void svm_vmexit_handler(struc
 
     exit_reason = vmcb->exitcode;
 
-    HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+    HVMTRACE_ND(VMEXIT64, 1/*cycles*/, 3, exit_reason,
                 (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
                 0, 0, 0);
 
@@ -1216,17 +1216,17 @@ asmlinkage void svm_vmexit_handler(struc
     {
     case VMEXIT_INTR:
         /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
-        HVMTRACE_0D(INTR, v);
+        HVMTRACE_0D(INTR);
         break;
 
     case VMEXIT_NMI:
         /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
-        HVMTRACE_0D(NMI, v);
+        HVMTRACE_0D(NMI);
         break;
 
     case VMEXIT_SMI:
         /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
-        HVMTRACE_0D(SMI, v);
+        HVMTRACE_0D(SMI);
         break;
 
     case VMEXIT_EXCEPTION_DB:
@@ -1262,9 +1262,9 @@ asmlinkage void svm_vmexit_handler(struc
         if ( paging_fault(va, regs) )
         {
             if (hvm_long_mode_enabled(v))
-                HVMTRACE_LONG_2D(PF_XEN, v, regs->error_code, 
TRC_PAR_LONG(va));
+                HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va));
             else
-                HVMTRACE_2D(PF_XEN, v, regs->error_code, va);
+                HVMTRACE_2D(PF_XEN, regs->error_code, va);
             break;
         }
 
@@ -1274,7 +1274,7 @@ asmlinkage void svm_vmexit_handler(struc
 
     /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
     case VMEXIT_EXCEPTION_MC:
-        HVMTRACE_0D(MCE, v);
+        HVMTRACE_0D(MCE);
         break;
 
     case VMEXIT_VINTR:
@@ -1331,7 +1331,7 @@ asmlinkage void svm_vmexit_handler(struc
     case VMEXIT_VMMCALL:
         if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
             break;
-        HVMTRACE_1D(VMMCALL, v, regs->eax);
+        HVMTRACE_1D(VMMCALL, regs->eax);
         rc = hvm_do_hypercall(regs);
         if ( rc != HVM_HCALL_preempted )
         {
@@ -1406,7 +1406,7 @@ asmlinkage void svm_vmexit_handler(struc
 
 asmlinkage void svm_trace_vmentry(void)
 {
-    HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
+    HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
 }
   
 /*
diff -r 0e3df63b9932 -r 1abe2bf8c0fa xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Mon Sep 08 15:50:33 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c       Mon Sep 08 15:52:03 2008 +0100
@@ -198,7 +198,7 @@ asmlinkage void vmx_intr_assist(void)
     }
     else
     {
-        HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
+        HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
         vmx_inject_extint(v, intack.vector);
         pt_intr_post(v, intack);
     }
diff -r 0e3df63b9932 -r 1abe2bf8c0fa xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 08 15:50:33 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 08 15:52:03 2008 +0100
@@ -1114,10 +1114,10 @@ static void __vmx_inject_exception(
     __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
 
     if ( trap == TRAP_page_fault )
-        HVMTRACE_LONG_2D(PF_INJECT, v, error_code,
+        HVMTRACE_LONG_2D(PF_INJECT, error_code,
             TRC_PAR_LONG(v->arch.hvm_vcpu.guest_cr[2]));
     else
-        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
+        HVMTRACE_2D(INJ_EXC, trap, error_code);
 }
 
 void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code)
@@ -1345,7 +1345,7 @@ static void vmx_cpuid_intercept(
             break;
     }
 
-    HVMTRACE_5D (CPUID, current, input, *eax, *ebx, *ecx, *edx);
+    HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
 }
 
 static void vmx_do_cpuid(struct cpu_user_regs *regs)
@@ -1370,7 +1370,7 @@ static void vmx_dr_access(unsigned long 
 {
     struct vcpu *v = current;
 
-    HVMTRACE_0D(DR_WRITE, v);
+    HVMTRACE_0D(DR_WRITE);
 
     if ( !v->arch.hvm_vcpu.flag_dr_dirty )
         __restore_debug_registers(v);
@@ -1383,7 +1383,7 @@ static void vmx_invlpg_intercept(unsigne
 static void vmx_invlpg_intercept(unsigned long vaddr)
 {
     struct vcpu *curr = current;
-    HVMTRACE_LONG_2D(INVLPG, curr, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
+    HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
     if ( paging_invlpg(curr, vaddr) )
         vpid_sync_vcpu_gva(curr, vaddr);
 }
@@ -1434,7 +1434,7 @@ static int mov_to_cr(int gp, int cr, str
         goto exit_and_crash;
     }
 
-    HVMTRACE_LONG_2D(CR_WRITE, v, cr, TRC_PAR_LONG(value));
+    HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
 
     HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
 
@@ -1505,7 +1505,7 @@ static void mov_from_cr(int cr, int gp, 
         break;
     }
 
-    HVMTRACE_LONG_2D(CR_READ, v, cr, TRC_PAR_LONG(value));
+    HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
 }
@@ -1531,13 +1531,13 @@ static int vmx_cr_access(unsigned long e
     case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
         v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
         vmx_update_guest_cr(v, 0);
-        HVMTRACE_0D(CLTS, current);
+        HVMTRACE_0D(CLTS);
         break;
     case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
         value = v->arch.hvm_vcpu.guest_cr[0];
         /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
         value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf);
-        HVMTRACE_LONG_1D(LMSW, current, value);
+        HVMTRACE_LONG_1D(LMSW, value);
         return !hvm_set_cr0(value);
     default:
         BUG();
@@ -1692,7 +1692,7 @@ static int vmx_msr_read_intercept(struct
     regs->edx = (uint32_t)(msr_content >> 32);
 
 done:
-    HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
+    HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
     HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
                 ecx, (unsigned long)regs->eax,
                 (unsigned long)regs->edx);
@@ -1803,7 +1803,7 @@ static int vmx_msr_write_intercept(struc
 
     msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
 
-    HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
+    HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
 
     switch ( ecx )
     {
@@ -1894,7 +1894,7 @@ static void vmx_do_extint(struct cpu_use
     BUG_ON(!(vector & INTR_INFO_VALID_MASK));
 
     vector &= INTR_INFO_VECTOR_MASK;
-    HVMTRACE_1D(INTR, current, vector);
+    HVMTRACE_1D(INTR, vector);
 
     switch ( vector )
     {
@@ -2010,7 +2010,7 @@ static void vmx_failed_vmentry(unsigned 
         break;
     case EXIT_REASON_MACHINE_CHECK:
         printk("caused by machine check.\n");
-        HVMTRACE_0D(MCE, curr);
+        HVMTRACE_0D(MCE);
         do_machine_check(regs);
         break;
     default:
@@ -2037,7 +2037,7 @@ asmlinkage void vmx_vmexit_handler(struc
 
     exit_reason = __vmread(VM_EXIT_REASON);
 
-    HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+    HVMTRACE_ND(VMEXIT64, 1/*cycles*/, 3, exit_reason,
                 (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
                 0, 0, 0);
 
@@ -2129,10 +2129,10 @@ asmlinkage void vmx_vmexit_handler(struc
             if ( paging_fault(exit_qualification, regs) )
             {
                 if ( hvm_long_mode_enabled(v) )
-                    HVMTRACE_LONG_2D (PF_XEN, v, regs->error_code,
+                    HVMTRACE_LONG_2D (PF_XEN, regs->error_code,
                         TRC_PAR_LONG(exit_qualification) );
                 else
-                    HVMTRACE_2D (PF_XEN, v,
+                    HVMTRACE_2D (PF_XEN,
                         regs->error_code, exit_qualification );
                 break;
             }
@@ -2144,11 +2144,11 @@ asmlinkage void vmx_vmexit_handler(struc
             if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=
                  (X86_EVENTTYPE_NMI << 8) )
                 goto exit_and_crash;
-            HVMTRACE_0D(NMI, v);
+            HVMTRACE_0D(NMI);
             do_nmi(regs); /* Real NMI, vector 2: normal processing. */
             break;
         case TRAP_machine_check:
-            HVMTRACE_0D(MCE, v);
+            HVMTRACE_0D(MCE);
             do_machine_check(regs);
             break;
         default:
@@ -2213,7 +2213,7 @@ asmlinkage void vmx_vmexit_handler(struc
     case EXIT_REASON_VMCALL:
     {
         int rc;
-        HVMTRACE_1D(VMMCALL, v, regs->eax);
+        HVMTRACE_1D(VMMCALL, regs->eax);
         inst_len = __get_instruction_length(); /* Safe: VMCALL */
         rc = hvm_do_hypercall(regs);
         if ( rc != HVM_HCALL_preempted )
@@ -2300,7 +2300,7 @@ asmlinkage void vmx_vmexit_handler(struc
 
 asmlinkage void vmx_trace_vmentry(void)
 {
-    HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
+    HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
 }
 
 /*
diff -r 0e3df63b9932 -r 1abe2bf8c0fa xen/include/asm-x86/hvm/trace.h
--- a/xen/include/asm-x86/hvm/trace.h   Mon Sep 08 15:50:33 2008 +0100
+++ b/xen/include/asm-x86/hvm/trace.h   Mon Sep 08 15:52:03 2008 +0100
@@ -56,16 +56,13 @@
 #define TRC_PAR_LONG(par) (par)
 #endif
 
-#define HVMTRACE_ND(evt, cycles, vcpu, count, d1, d2, d3, d4, d5, d6)   \
+#define HVMTRACE_ND(evt, cycles, count, d1, d2, d3, d4, d5, d6)         \
     do {                                                                \
         if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt )             \
         {                                                               \
             struct {                                                    \
-                u32 did:16, vid:16;                                     \
                 u32 d[6];                                               \
             } _d;                                                       \
-            _d.did=(vcpu)->domain->domain_id;                           \
-            _d.vid=(vcpu)->vcpu_id;                                     \
             _d.d[0]=(d1);                                               \
             _d.d[1]=(d2);                                               \
             _d.d[2]=(d3);                                               \
@@ -77,32 +74,32 @@
         }                                                               \
     } while(0)
 
-#define HVMTRACE_6D(evt, vcpu, d1, d2, d3, d4, d5, d6)    \
-                      HVMTRACE_ND(evt, 0, vcpu, 6, d1, d2, d3,  d4, d5, d6)
-#define HVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5)        \
-                      HVMTRACE_ND(evt, 0, vcpu, 5, d1, d2, d3,  d4, d5, 0)
-#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4)               \
-                      HVMTRACE_ND(evt, 0, vcpu, 4, d1, d2, d3,  d4, 0, 0)
-#define HVMTRACE_3D(evt, vcpu, d1, d2, d3)                   \
-                      HVMTRACE_ND(evt, 0, vcpu, 3, d1, d2, d3,  0, 0, 0)
-#define HVMTRACE_2D(evt, vcpu, d1, d2)                       \
-                      HVMTRACE_ND(evt, 0, vcpu, 2, d1, d2,  0,  0, 0, 0)
-#define HVMTRACE_1D(evt, vcpu, d1)                           \
-                      HVMTRACE_ND(evt, 0, vcpu, 1, d1,  0,  0,  0, 0, 0)
-#define HVMTRACE_0D(evt, vcpu)                               \
-                      HVMTRACE_ND(evt, 0, vcpu, 0, 0,  0,  0,  0, 0, 0)
+#define HVMTRACE_6D(evt, d1, d2, d3, d4, d5, d6)    \
+                      HVMTRACE_ND(evt, 0, 6, d1, d2, d3,  d4, d5, d6)
+#define HVMTRACE_5D(evt, d1, d2, d3, d4, d5)        \
+                      HVMTRACE_ND(evt, 0, 5, d1, d2, d3,  d4, d5, 0)
+#define HVMTRACE_4D(evt, d1, d2, d3, d4)               \
+                      HVMTRACE_ND(evt, 0, 4, d1, d2, d3,  d4, 0, 0)
+#define HVMTRACE_3D(evt, d1, d2, d3)                   \
+                      HVMTRACE_ND(evt, 0, 3, d1, d2, d3,  0, 0, 0)
+#define HVMTRACE_2D(evt, d1, d2)                       \
+                      HVMTRACE_ND(evt, 0, 2, d1, d2,  0,  0, 0, 0)
+#define HVMTRACE_1D(evt, d1)                           \
+                      HVMTRACE_ND(evt, 0, 1, d1,  0,  0,  0, 0, 0)
+#define HVMTRACE_0D(evt)                               \
+                      HVMTRACE_ND(evt, 0, 0, 0,  0,  0,  0, 0, 0)
 
 
 
 #ifdef __x86_64__
-#define HVMTRACE_LONG_1D(evt, vcpu, d1)                  \
-                   HVMTRACE_2D(evt ## 64, vcpu, (d1) & 0xFFFFFFFF, (d1) >> 32)
-#define HVMTRACE_LONG_2D(evt,vcpu,d1,d2, ...)              \
-                   HVMTRACE_3D(evt ## 64, vcpu, d1, d2)
-#define HVMTRACE_LONG_3D(evt, vcpu, d1, d2, d3, ...)      \
-                   HVMTRACE_4D(evt ## 64, vcpu, d1, d2, d3)
-#define HVMTRACE_LONG_4D(evt, vcpu, d1, d2, d3, d4, ...)  \
-                   HVMTRACE_5D(evt ## 64, vcpu, d1, d2, d3, d4)
+#define HVMTRACE_LONG_1D(evt, d1)                  \
+                   HVMTRACE_2D(evt ## 64, (d1) & 0xFFFFFFFF, (d1) >> 32)
+#define HVMTRACE_LONG_2D(evt, d1, d2, ...)              \
+                   HVMTRACE_3D(evt ## 64, d1, d2)
+#define HVMTRACE_LONG_3D(evt, d1, d2, d3, ...)      \
+                   HVMTRACE_4D(evt ## 64, d1, d2, d3)
+#define HVMTRACE_LONG_4D(evt, d1, d2, d3, d4, ...)  \
+                   HVMTRACE_5D(evt ## 64, d1, d2, d3, d4)
 #else
 #define HVMTRACE_LONG_1D HVMTRACE_1D
 #define HVMTRACE_LONG_2D HVMTRACE_2D

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] xentrace 3/7: Remove vcpu / domain from HVM traces., Xen patchbot-unstable <=