WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-4.0-testing] Eliminate unnecessary casts from __tra

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-4.0-testing] Eliminate unnecessary casts from __trace_var() invocations
From: "Xen patchbot-4.0-testing" <patchbot-4.0-testing@xxxxxxxxxxxxxxxxxxx>
Date: Sun, 17 Oct 2010 05:55:16 -0700
Delivery-date: Sun, 17 Oct 2010 05:56:30 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1285010398 -3600
# Node ID da327d3fbc640d3de43d4280370e1571276329e2
# Parent  51cb21e3da8a13df4c72654f5c29b01cd2b8b0b0
Eliminate unnecessary casts from __trace_var() invocations

This is possible now that its last parameter's type is 'const void *'.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
xen-unstable changeset:   22184:62a44418d8a0
xen-unstable date:        Mon Sep 20 18:52:48 2010 +0100
---
 xen/arch/x86/hvm/svm/svm.c      |    2 -
 xen/arch/x86/hvm/vmx/vmx.c      |    2 -
 xen/arch/x86/mm/p2m.c           |   10 ++++-----
 xen/arch/x86/mm/shadow/common.c |    7 ++----
 xen/arch/x86/mm/shadow/multi.c  |   10 ++++-----
 xen/arch/x86/trace.c            |   41 ++++++++++++++++++----------------------
 xen/common/memory.c             |    2 -
 xen/common/schedule.c           |    5 +---
 xen/include/asm-x86/hvm/trace.h |    2 -
 9 files changed, 38 insertions(+), 43 deletions(-)

diff -r 51cb21e3da8a -r da327d3fbc64 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon Sep 20 20:19:58 2010 +0100
@@ -932,7 +932,7 @@ static void svm_do_nested_pgfault(paddr_
         _d.qualification = 0;
         _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
         
-        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
     if ( hvm_hap_nested_page_fault(gfn) )
diff -r 51cb21e3da8a -r da327d3fbc64 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 20 20:19:58 2010 +0100
@@ -2129,7 +2129,7 @@ static void ept_handle_violation(unsigne
         _d.qualification = qualification;
         _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
         
-        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
     if ( (qualification & EPT_GLA_VALID) &&
diff -r 51cb21e3da8a -r da327d3fbc64 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Mon Sep 20 20:19:58 2010 +0100
@@ -841,7 +841,7 @@ p2m_pod_zero_check_superpage(struct doma
         t.d = d->domain_id;
         t.order = 9;
 
-        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char 
*)&t);
+        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), &t);
     }
 
     /* Finally!  We've passed all the checks, and can add the mfn superpage
@@ -955,7 +955,7 @@ p2m_pod_zero_check(struct domain *d, uns
                 t.d = d->domain_id;
                 t.order = 0;
         
-                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned 
char *)&t);
+                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), &t);
             }
 
             /* Add to cache, and account for the new p2m PoD entry */
@@ -1115,7 +1115,7 @@ p2m_pod_demand_populate(struct domain *d
         t.d = d->domain_id;
         t.order = order;
         
-        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t);
+        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
     }
 
     return 0;
@@ -1146,7 +1146,7 @@ remap_and_retry:
         t.gfn = gfn;
         t.d = d->domain_id;
         
-        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned 
char *)&t);
+        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), &t);
     }
 
     return 0;
@@ -1212,7 +1212,7 @@ p2m_set_entry(struct domain *d, unsigned
         t.d = d->domain_id;
         t.order = page_order;
 
-        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
+        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
 #if CONFIG_PAGING_LEVELS >= 4
diff -r 51cb21e3da8a -r da327d3fbc64 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Sep 20 20:19:58 2010 +0100
@@ -718,7 +718,7 @@ static inline void trace_resync(int even
     {
         /* Convert gmfn to gfn */
         unsigned long gfn = mfn_to_gfn(current->domain, gmfn);
-        __trace_var(event, 0/*!tsc*/, sizeof(gfn), (unsigned char*)&gfn);
+        __trace_var(event, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
@@ -1348,8 +1348,7 @@ static inline void trace_shadow_prealloc
         unsigned long gfn;
         ASSERT(mfn_valid(smfn));
         gfn = mfn_to_gfn(d, backpointer(mfn_to_page(smfn)));
-        __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
-                    sizeof(gfn), (unsigned char*)&gfn);
+        __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
@@ -2293,7 +2292,7 @@ static inline void trace_shadow_wrmap_bf
     {
         /* Convert gmfn to gfn */
         unsigned long gfn = mfn_to_gfn(current->domain, gmfn);
-        __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), (unsigned 
char*)&gfn);
+        __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
diff -r 51cb21e3da8a -r da327d3fbc64 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Mon Sep 20 20:19:58 2010 +0100
@@ -2847,7 +2847,7 @@ static inline void trace_shadow_gen(u32 
     if ( tb_init_done )
     {
         event |= (GUEST_PAGING_LEVELS-2)<<8;
-        __trace_var(event, 0/*!tsc*/, sizeof(va), (unsigned char*)&va);
+        __trace_var(event, 0/*!tsc*/, sizeof(va), &va);
     }
 }
 
@@ -2871,7 +2871,7 @@ static inline void trace_shadow_fixup(gu
         d.va = va;
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
                                           
@@ -2895,7 +2895,7 @@ static inline void trace_not_shadow_faul
         d.va = va;
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
                                           
@@ -2921,7 +2921,7 @@ static inline void trace_shadow_emulate_
         d.gfn=gfn_x(gfn);
         d.va = va;
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
 
@@ -2954,7 +2954,7 @@ static inline void trace_shadow_emulate(
 #endif
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
 
diff -r 51cb21e3da8a -r da327d3fbc64 xen/arch/x86/trace.c
--- a/xen/arch/x86/trace.c      Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/arch/x86/trace.c      Mon Sep 20 20:19:58 2010 +0100
@@ -25,8 +25,7 @@ asmlinkage void trace_hypercall(void)
         d.eip = regs->eip;
         d.eax = regs->eax;
 
-        __trace_var(TRC_PV_HYPERCALL, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_HYPERCALL, 1, sizeof(d), &d);
     }
     else
 #endif
@@ -42,7 +41,7 @@ asmlinkage void trace_hypercall(void)
         d.eip = regs->eip;
         d.eax = regs->eax;
 
-        __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
     }
 }
 
@@ -64,8 +63,7 @@ void __trace_pv_trap(int trapnr, unsigne
         d.error_code = error_code;
         d.use_error_code=!!use_error_code;
                 
-        __trace_var(TRC_PV_TRAP, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_TRAP, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -85,7 +83,7 @@ void __trace_pv_trap(int trapnr, unsigne
                 
         event = TRC_PV_TRAP;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -104,7 +102,7 @@ void __trace_pv_page_fault(unsigned long
         d.addr = addr;
         d.error_code = error_code;
                 
-        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -120,7 +118,7 @@ void __trace_pv_page_fault(unsigned long
         d.error_code = error_code;
         event = TRC_PV_PAGE_FAULT;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -130,13 +128,13 @@ void __trace_trap_one_addr(unsigned even
     if ( is_pv_32on64_vcpu(current) )
     {
         u32 d = va;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
-    }
-    else
-#endif        
-    {
-        event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(va), (unsigned char *)&va);
+        __trace_var(event, 1, sizeof(d), &d);
+    }
+    else
+#endif        
+    {
+        event |= TRC_64_FLAG;
+        __trace_var(event, 1, sizeof(va), &va);
     }
 }
 
@@ -151,7 +149,7 @@ void __trace_trap_two_addr(unsigned even
         } __attribute__((packed)) d;
         d.va1=va1;
         d.va2=va2;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -162,7 +160,7 @@ void __trace_trap_two_addr(unsigned even
         d.va1=va1;
         d.va2=va2;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -189,8 +187,7 @@ void __trace_ptwr_emulation(unsigned lon
         d.eip = eip;
         d.pte = npte;
 
-        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -208,6 +205,6 @@ void __trace_ptwr_emulation(unsigned lon
         event = ((CONFIG_PAGING_LEVELS == 3) ?
                  TRC_PV_PTWR_EMULATION_PAE : TRC_PV_PTWR_EMULATION);
         event |= TRC_64_FLAG;
-        __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
-    }
-}
+        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
+    }
+}
diff -r 51cb21e3da8a -r da327d3fbc64 xen/common/memory.c
--- a/xen/common/memory.c       Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/common/memory.c       Mon Sep 20 20:19:58 2010 +0100
@@ -234,7 +234,7 @@ static void decrease_reservation(struct 
             t.d = a->domain->domain_id;
             t.order = a->extent_order;
         
-            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned 
char *)&t);
+            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t);
         }
 
         /* See if populate-on-demand wants to handle this */
diff -r 51cb21e3da8a -r da327d3fbc64 xen/common/schedule.c
--- a/xen/common/schedule.c     Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/common/schedule.c     Mon Sep 20 20:19:58 2010 +0100
@@ -83,7 +83,7 @@ static inline void trace_runstate_change
     event |= ( v->runstate.state & 0x3 ) << 8;
     event |= ( new_state & 0x3 ) << 4;
 
-    __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
+    __trace_var(event, 1/*tsc*/, sizeof(d), &d);
 }
 
 static inline void trace_continue_running(struct vcpu *v)
@@ -96,8 +96,7 @@ static inline void trace_continue_runnin
     d.vcpu = v->vcpu_id;
     d.domain = v->domain->domain_id;
 
-    __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d),
-                (unsigned char *)&d);
+    __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d), &d);
 }
 
 static inline void vcpu_urgent_count_update(struct vcpu *v)
diff -r 51cb21e3da8a -r da327d3fbc64 xen/include/asm-x86/hvm/trace.h
--- a/xen/include/asm-x86/hvm/trace.h   Mon Sep 20 20:19:01 2010 +0100
+++ b/xen/include/asm-x86/hvm/trace.h   Mon Sep 20 20:19:58 2010 +0100
@@ -71,7 +71,7 @@
             _d.d[4]=(d5);                                               \
             _d.d[5]=(d6);                                               \
             __trace_var(TRC_HVM_ ## evt, cycles,                        \
-                        sizeof(u32)*count+1, (unsigned char *)&_d);     \
+                        sizeof(u32)*count+1, &_d);                      \
         }                                                               \
     } while(0)
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-4.0-testing] Eliminate unnecessary casts from __trace_var() invocations, Xen patchbot-4.0-testing <=