WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] convert more privop_stat to perfc

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] convert more privop_stat to perfc
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 09 Aug 2006 22:20:34 +0000
Delivery-date: Wed, 09 Aug 2006 15:25:10 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 7cde0d938ef49e2829d48fe5031d07d1234e0068
# Parent  e7394daf098db8123e3e0d705410dac7b134a81c
[IA64] convert more privop_stat to perfc

Convert most privop stats to perfc.

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
---
 xen/arch/ia64/asm-offsets.c        |    2 
 xen/arch/ia64/xen/domain.c         |    1 
 xen/arch/ia64/xen/faults.c         |    6 -
 xen/arch/ia64/xen/hyperprivop.S    |  171 ++++++++++++++++---------------
 xen/arch/ia64/xen/privop.c         |   44 ++++----
 xen/arch/ia64/xen/privop_stat.c    |  203 -------------------------------------
 xen/arch/ia64/xen/vhpt.c           |    5 
 xen/common/page_alloc.c            |    2 
 xen/include/asm-ia64/perfc_defn.h  |   43 +++++--
 xen/include/asm-ia64/privop_stat.h |   26 ----
 xen/include/asm-ia64/vhpt.h        |    1 
 11 files changed, 153 insertions(+), 351 deletions(-)

diff -r e7394daf098d -r 7cde0d938ef4 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/arch/ia64/asm-offsets.c       Fri Aug 04 09:02:43 2006 -0600
@@ -215,5 +215,7 @@ void foo(void)
        BLANK();
        DEFINE(RECOVER_TO_PAGE_FAULT_PERFC_OFS, offsetof (struct perfcounter, 
recover_to_page_fault));
        DEFINE(RECOVER_TO_BREAK_FAULT_PERFC_OFS, offsetof (struct perfcounter, 
recover_to_break_fault));
+       DEFINE(FAST_HYPERPRIVOP_PERFC_OFS, offsetof (struct perfcounter, 
fast_hyperprivop));
+       DEFINE(FAST_REFLECT_PERFC_OFS, offsetof (struct perfcounter, 
fast_reflect));
 #endif
 }
diff -r e7394daf098d -r 7cde0d938ef4 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Fri Aug 04 09:02:43 2006 -0600
@@ -136,7 +136,6 @@ void context_switch(struct vcpu *prev, s
     uint64_t pta;
 
     local_irq_save(spsr);
-    perfc_incrc(context_switch);
 
     __ia64_save_fpu(prev->arch._thread.fph);
     __ia64_load_fpu(next->arch._thread.fph);
diff -r e7394daf098d -r 7cde0d938ef4 xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c        Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/arch/ia64/xen/faults.c        Fri Aug 04 09:02:43 2006 -0600
@@ -51,8 +51,6 @@ extern IA64FAULT ia64_hypercall(struct p
 
 extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
 
-#define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
-
 // should never panic domain... if it does, stack may have been overrun
 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, 
unsigned long vector)
 {
@@ -92,7 +90,7 @@ void reflect_interruption(unsigned long 
        v->vcpu_info->evtchn_upcall_mask = 1;
        PSCB(v,interrupt_collection_enabled) = 0;
 
-       inc_slow_reflect_count(vector);
+       perfc_incra(slow_reflect, vector >> 8);
 }
 
 static unsigned long pending_false_positive = 0;
@@ -247,7 +245,7 @@ void ia64_do_page_fault (unsigned long a
                regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | 
DELIVER_PSR_SET;
                // NOTE: nested trap must NOT pass PSCB address
                //regs->r31 = (unsigned long) &PSCB(current);
-               inc_slow_reflect_count(fault);
+               perfc_incra(slow_reflect, fault >> 8);
                return;
        }
 
diff -r e7394daf098d -r 7cde0d938ef4 xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S   Fri Aug 04 09:02:43 2006 -0600
@@ -22,34 +22,39 @@
 #define _PAGE_PL_2     (2<<7)
 
 #if 1   // change to 0 to turn off all fast paths
-#define FAST_HYPERPRIVOPS
-#define FAST_HYPERPRIVOP_CNT
-#define FAST_REFLECT_CNT
+# define FAST_HYPERPRIVOPS
+# ifdef PERF_COUNTERS
+#  define FAST_HYPERPRIVOP_CNT
+#  define FAST_HYPERPRIVOP_PERFC(N) \
+       (perfcounters + FAST_HYPERPRIVOP_PERFC_OFS + (4 * N))
+#  define FAST_REFLECT_CNT
+# endif
+       
 //#define FAST_TICK // mostly working (unat problems) but default off for now
 //#define FAST_TLB_MISS_REFLECT        // mostly working but default off for 
now
-#ifdef CONFIG_XEN_IA64_DOM0_VP
-#undef FAST_ITC        //XXX CONFIG_XEN_IA64_DOM0_VP
+# ifdef CONFIG_XEN_IA64_DOM0_VP
+#  undef FAST_ITC      //XXX CONFIG_XEN_IA64_DOM0_VP
                //    TODO fast_itc doesn't suport dom0 vp yet.
-#else
-//#define FAST_ITC     // to be reviewed
-#endif
-#define FAST_BREAK
-#ifndef CONFIG_XEN_IA64_DOM0_VP
-# define FAST_ACCESS_REFLECT
-#else
-# undef FAST_ACCESS_REFLECT //XXX CONFIG_XEN_IA64_DOM0_VP
+# else
+//#  define FAST_ITC   // to be reviewed
+# endif
+# define FAST_BREAK
+# ifndef CONFIG_XEN_IA64_DOM0_VP
+#  define FAST_ACCESS_REFLECT
+# else
+#  undef FAST_ACCESS_REFLECT //XXX CONFIG_XEN_IA64_DOM0_VP
                             //    TODO fast_access_reflect
                             //    doesn't support dom0 vp yet.
-#endif
-#define FAST_RFI
-#define FAST_SSM_I
-#define FAST_PTC_GA
-#undef RFI_TO_INTERRUPT // not working yet
+# endif
+# define FAST_RFI
+# define FAST_SSM_I
+# define FAST_PTC_GA
+# undef RFI_TO_INTERRUPT // not working yet
 #endif
 
 #ifdef CONFIG_SMP
-//#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
-#undef FAST_PTC_GA
+ //#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
+ #undef FAST_PTC_GA
 #endif
 
 // FIXME: turn off for now... but NaTs may crash Xen so re-enable soon!
@@ -237,10 +242,10 @@ ENTRY(hyper_ssm_i)
        cmp.ne p7,p0=r21,r0
 (p7)   br.sptk.many dispatch_break_fault ;;
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SSM_I);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SSM_I);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        // set shared_mem iip to instruction after HYPER_SSM_I
        extr.u r20=r30,41,2 ;;
@@ -373,10 +378,10 @@ GLOBAL_ENTRY(fast_tick_reflect)
        mov rp=r29;;
        mov cr.itm=r26;;        // ensure next tick
 #ifdef FAST_REFLECT_CNT
-       movl r20=fast_reflect_count+((0x3000>>8)*8);;
-       ld8 r21=[r20];;
+       movl r20=perfcounters+FAST_REFLECT_PERFC_OFS+((0x3000>>8)*4);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        // vcpu_pend_timer(current)
        movl r18=THIS_CPU(current_psr_ic_addr)
@@ -611,12 +616,12 @@ END(fast_break_reflect)
 //     r31 == pr
 ENTRY(fast_reflect)
 #ifdef FAST_REFLECT_CNT
-       movl r22=fast_reflect_count;
-       shr r23=r20,5;;
+       movl r22=perfcounters+FAST_REFLECT_PERFC_OFS;
+       shr r23=r20,8-2;;
        add r22=r22,r23;;
-       ld8 r21=[r22];;
+       ld4 r21=[r22];;
        adds r21=1,r21;;
-       st8 [r22]=r21;;
+       st4 [r22]=r21;;
 #endif
        // save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
        adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
@@ -948,10 +953,10 @@ 1:        // check the guest VHPT
 (p7)   br.cond.spnt.few page_not_present;;
 
 #ifdef FAST_REFLECT_CNT
-       movl r21=fast_vhpt_translate_count;;
-       ld8 r22=[r21];;
+       movl r21=perfcounter+FAST_VHPT_TRANSLATE_PERFC_OFS;;
+       ld4 r22=[r21];;
        adds r22=1,r22;;
-       st8 [r21]=r22;;
+       st4 [r21]=r22;;
 #endif
 
 // prepare for fast_insert(PSCB(ifa),PSCB(itir),r16=pte)
@@ -979,9 +984,9 @@ ENTRY(recover_and_page_fault)
 ENTRY(recover_and_page_fault)
 #ifdef PERF_COUNTERS
        movl r21=perfcounters + RECOVER_TO_PAGE_FAULT_PERFC_OFS;;
-       ld8 r22=[r21];;
+       ld4 r22=[r21];;
        adds r22=1,r22;;
-       st8 [r21]=r22;;
+       st4 [r21]=r22;;
 #endif
        mov b0=r29;;
        br.cond.sptk.many page_fault;;
@@ -1083,10 +1088,10 @@ 1:
 
 1:     // OK now, let's do an rfi.
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_RFI);;
-       ld8 r23=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_RFI);;
+       ld4 r23=[r20];;
        adds r23=1,r23;;
-       st8 [r20]=r23;;
+       st4 [r20]=r23;;
 #endif
 #ifdef RFI_TO_INTERRUPT
        // maybe do an immediate interrupt delivery?
@@ -1339,10 +1344,10 @@ END(rfi_with_interrupt)
 
 ENTRY(hyper_cover)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_COVER);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_COVER);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        mov r24=cr.ipsr
        mov r25=cr.iip;;
@@ -1375,10 +1380,10 @@ END(hyper_cover)
 // return from metaphysical mode (meta=1) to virtual mode (meta=0)
 ENTRY(hyper_ssm_dt)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SSM_DT);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SSM_DT);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        mov r24=cr.ipsr
        mov r25=cr.iip;;
@@ -1412,10 +1417,10 @@ END(hyper_ssm_dt)
 // go to metaphysical mode (meta=1) from virtual mode (meta=0)
 ENTRY(hyper_rsm_dt)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_RSM_DT);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_RSM_DT);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        mov r24=cr.ipsr
        mov r25=cr.iip;;
@@ -1449,10 +1454,10 @@ END(hyper_rsm_dt)
 
 ENTRY(hyper_get_tpr)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_GET_TPR);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_TPR);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        mov r24=cr.ipsr
        mov r25=cr.iip;;
@@ -1478,10 +1483,10 @@ END(hyper_get_tpr)
 // (or accidentally missing) delivering an interrupt
 ENTRY(hyper_set_tpr)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_TPR);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_TPR);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        mov r24=cr.ipsr
        mov r25=cr.iip;;
@@ -1506,10 +1511,10 @@ END(hyper_set_tpr)
 
 ENTRY(hyper_get_ivr)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r22=fast_hyperpriv_cnt+(8*HYPERPRIVOP_GET_IVR);;
-       ld8 r21=[r22];;
+       movl r22=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_IVR);;
+       ld4 r21=[r22];;
        adds r21=1,r21;;
-       st8 [r22]=r21;;
+       st4 [r22]=r21;;
 #endif
        mov r8=15;;
        // when we get to here r20=~=interrupts pending
@@ -1618,10 +1623,10 @@ ENTRY(hyper_eoi)
        cmp.ne p7,p0=r20,r0
 (p7)   br.spnt.many dispatch_break_fault ;;
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_EOI);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_EOI);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
        ld8 r22=[r22];;
@@ -1682,10 +1687,10 @@ ENTRY(hyper_set_itm)
        cmp.ne p7,p0=r20,r0
 (p7)   br.spnt.many dispatch_break_fault ;;
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_ITM);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_ITM);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        movl r20=THIS_CPU(cpu_info)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
        ld8 r21=[r20];;
@@ -1723,10 +1728,10 @@ END(hyper_set_itm)
 
 ENTRY(hyper_get_rr)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_GET_RR);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_RR);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        extr.u r25=r8,61,3;;
        adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
@@ -1755,10 +1760,10 @@ ENTRY(hyper_set_rr)
        cmp.leu p7,p0=7,r25     // punt on setting rr7
 (p7)   br.spnt.many dispatch_break_fault ;;
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_RR);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_RR);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        extr.u r26=r9,8,24      // r26 = r9.rid
        movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
@@ -1813,10 +1818,10 @@ ENTRY(hyper_set_kr)
        cmp.ne p7,p0=r0,r25     // if kr# > 7, go slow way
 (p7)   br.spnt.many dispatch_break_fault ;;
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_KR);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_KR);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        adds r21=XSI_KR0_OFS-XSI_PSR_IC_OFS,r18 ;;
        shl r20=r8,3;;
@@ -1871,10 +1876,10 @@ END(hyper_set_kr)
 //     r31 == pr
 ENTRY(hyper_thash)
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_THASH);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_THASH);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        shr.u r20 = r8, 61
        addl r25 = 1, r0
@@ -1940,10 +1945,10 @@ ENTRY(hyper_ptc_ga)
 #endif
        // FIXME: validate not flushing Xen addresses
 #ifdef FAST_HYPERPRIVOP_CNT
-       movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_PTC_GA);;
-       ld8 r21=[r20];;
+       movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_PTC_GA);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
        mov r28=r8
        extr.u r19=r9,2,6               // addr_range=1<<((r9&0xfc)>>2)
@@ -2011,9 +2016,9 @@ ENTRY(recover_and_dispatch_break_fault)
 ENTRY(recover_and_dispatch_break_fault)
 #ifdef PERF_COUNTERS
        movl r21=perfcounters + RECOVER_TO_BREAK_FAULT_PERFC_OFS;;
-       ld8 r22=[r21];;
+       ld4 r22=[r21];;
        adds r22=1,r22;;
-       st8 [r21]=r22;;
+       st4 [r21]=r22;;
 #endif
        mov b0=r29 ;;
        br.sptk.many dispatch_break_fault;;
@@ -2054,11 +2059,11 @@ hyper_itc_d:
 (p7)   br.spnt.many dispatch_break_fault ;;
 #ifdef FAST_HYPERPRIVOP_CNT
        cmp.eq p6,p7=HYPERPRIVOP_ITC_D,r17;;
-(p6)   movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_ITC_D);;
-(p7)   movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_ITC_I);;
-       ld8 r21=[r20];;
+(p6)   movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_ITC_D);;
+(p7)   movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_ITC_I);;
+       ld4 r21=[r20];;
        adds r21=1,r21;;
-       st8 [r20]=r21;;
+       st4 [r20]=r21;;
 #endif
 (p6)   mov r17=2;;
 (p7)   mov r17=3;;
diff -r e7394daf098d -r 7cde0d938ef4 xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/arch/ia64/xen/privop.c        Fri Aug 04 09:02:43 2006 -0600
@@ -275,7 +275,7 @@ static IA64FAULT priv_mov_to_cr(VCPU *vc
 static IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
 {
        UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
-       privcnt.to_cr_cnt[inst.M32.cr3]++;
+       perfc_incra(mov_to_cr, inst.M32.cr3);
        switch (inst.M32.cr3) {
            case 0: return vcpu_set_dcr(vcpu,val);
            case 1: return vcpu_set_itm(vcpu,val);
@@ -417,7 +417,7 @@ static IA64FAULT priv_mov_from_cr(VCPU *
        UINT64 val;
        IA64FAULT fault;
 
-       privcnt.from_cr_cnt[inst.M33.cr3]++;
+       perfc_incra(mov_from_cr, inst.M33.cr3);
        switch (inst.M33.cr3) {
            case 0: return cr_get(dcr);
            case 1: return cr_get(itm);
@@ -563,15 +563,15 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
 #endif
                        if (inst.M29.x3 != 0) break;
                        if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
-                               privcnt.mov_to_ar_imm++;
+                               perfc_incrc(mov_to_ar_imm);
                                return priv_mov_to_ar_imm(vcpu,inst);
                        }
                        if (inst.M44.x4 == 6) {
-                               privcnt.ssm++;
+                               perfc_incrc(ssm);
                                return priv_ssm(vcpu,inst);
                        }
                        if (inst.M44.x4 == 7) {
-                               privcnt.rsm++;
+                               perfc_incrc(rsm);
                                return priv_rsm(vcpu,inst);
                        }
                        break;
@@ -580,8 +580,9 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
                x6 = inst.M29.x6;
                if (x6 == 0x2a) {
                        if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
-                               privcnt.mov_from_ar++; // privified mov from kr
-                       else privcnt.mov_to_ar_reg++;
+                               perfc_incrc(mov_from_ar); // privified mov from 
kr
+                       else
+                               perfc_incrc(mov_to_ar_reg);
                        return priv_mov_to_ar_reg(vcpu,inst);
                }
                if (inst.M29.x3 != 0) break;
@@ -593,31 +594,33 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
                        }
                }
                if (privify_en && x6 == 52 && inst.M28.r3 > 63)
-                       privcnt.fc++;
+                       perfc_incrc(fc);
                else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
-                       privcnt.cpuid++;
-               else privcnt.Mpriv_cnt[x6]++;
+                       perfc_incrc(cpuid);
+               else
+                       perfc_incra(misc_privop, x6);
                return (*pfunc)(vcpu,inst);
                break;
            case B:
                if (inst.generic.major != 0) break;
                if (inst.B8.x6 == 0x08) {
                        IA64FAULT fault;
-                       privcnt.rfi++;
+                       perfc_incrc(rfi);
                        fault = priv_rfi(vcpu,inst);
                        if (fault == IA64_NO_FAULT) fault = 
IA64_RFI_IN_PROGRESS;
                        return fault;
                }
                if (inst.B8.x6 == 0x0c) {
-                       privcnt.bsw0++;
+                       perfc_incrc(bsw0);
                        return priv_bsw0(vcpu,inst);
                }
                if (inst.B8.x6 == 0x0d) {
-                       privcnt.bsw1++;
+                       perfc_incrc(bsw1);
                        return priv_bsw1(vcpu,inst);
                }
-               if (inst.B8.x6 == 0x0) { // break instr for privified cover
-                       privcnt.cover++;
+               if (inst.B8.x6 == 0x0) {
+                       // break instr for privified cover
+                       perfc_incrc(cover);
                        return priv_cover(vcpu,inst);
                }
                break;
@@ -625,19 +628,20 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
                if (inst.generic.major != 0) break;
 #if 0
                if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
-                       privcnt.cover++;
+                       perfc_incrc(cover);
                        return priv_cover(vcpu,inst);
                }
 #endif
                if (inst.I26.x3 != 0) break;  // I26.x3 == I27.x3
                if (inst.I26.x6 == 0x2a) {
                        if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
-                               privcnt.mov_from_ar++; // privified mov from kr
-                       else privcnt.mov_to_ar_reg++;
+                               perfc_incrc(mov_from_ar); // privified mov from 
kr
+                       else 
+                               perfc_incrc(mov_to_ar_reg);
                        return priv_mov_to_ar_reg(vcpu,inst);
                }
                if (inst.I27.x6 == 0x0a) {
-                       privcnt.mov_to_ar_imm++;
+                       perfc_incrc(mov_to_ar_imm);
                        return priv_mov_to_ar_imm(vcpu,inst);
                }
                break;
@@ -705,7 +709,7 @@ ia64_hyperprivop(unsigned long iim, REGS
                             iim, regs->cr_iip);
                return 1;
        }
-       slow_hyperpriv_cnt[iim]++;
+       perfc_incra(slow_hyperprivop, iim);
        switch(iim) {
            case HYPERPRIVOP_RFI:
                (void)vcpu_rfi(v);
diff -r e7394daf098d -r 7cde0d938ef4 xen/arch/ia64/xen/privop_stat.c
--- a/xen/arch/ia64/xen/privop_stat.c   Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/arch/ia64/xen/privop_stat.c   Fri Aug 04 09:02:43 2006 -0600
@@ -2,14 +2,6 @@
 #include <asm/vhpt.h>
 #include <xen/lib.h>
 #include <asm/uaccess.h>
-
-unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
-unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
-
-unsigned long slow_reflect_count[0x80] = { 0 };
-unsigned long fast_reflect_count[0x80] = { 0 };
-
-struct privop_counters privcnt;
 
 #ifdef PRIVOP_ADDR_COUNT
 #define PRIVOP_COUNT_NINSTS 2
@@ -85,6 +77,7 @@ Privileged operation instrumentation rou
 Privileged operation instrumentation routines
 **************************************************************************/
 
+#if 0
 static const char * const Mpriv_str[64] = {
        "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
        "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
@@ -120,185 +113,12 @@ static const char * const cr_str[128] = 
        RS,RS,RS,RS,RS,RS,RS,RS
 };
 
-// FIXME: should use snprintf to ensure no buffer overflow
-static int dump_privop_counts(char *buf)
-{
-       int i, j;
-       unsigned long sum = 0;
-       char *s = buf;
-
-       // this is ugly and should probably produce sorted output
-       // but it will have to do for now
-       sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
-       sum += privcnt.ssm; sum += privcnt.rsm;
-       sum += privcnt.rfi; sum += privcnt.bsw0;
-       sum += privcnt.bsw1; sum += privcnt.cover;
-       for (i=0; i < 64; i++)
-               sum += privcnt.Mpriv_cnt[i];
-       s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
-       if (privcnt.mov_to_ar_imm)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_imm,
-                       "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
-       if (privcnt.mov_to_ar_reg)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_reg,
-                       "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
-       if (privcnt.mov_from_ar)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_from_ar,
-                       "privified-mov_from_ar", 
(privcnt.mov_from_ar*100L)/sum);
-       if (privcnt.ssm)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.ssm,
-                       "ssm", (privcnt.ssm*100L)/sum);
-       if (privcnt.rsm)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rsm,
-                       "rsm", (privcnt.rsm*100L)/sum);
-       if (privcnt.rfi)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rfi,
-                       "rfi", (privcnt.rfi*100L)/sum);
-       if (privcnt.bsw0)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw0,
-                       "bsw0", (privcnt.bsw0*100L)/sum);
-       if (privcnt.bsw1)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw1,
-                       "bsw1", (privcnt.bsw1*100L)/sum);
-       if (privcnt.cover)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cover,
-                       "cover", (privcnt.cover*100L)/sum);
-       if (privcnt.fc)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.fc,
-                       "privified-fc", (privcnt.fc*100L)/sum);
-       if (privcnt.cpuid)
-               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cpuid,
-                       "privified-getcpuid", (privcnt.cpuid*100L)/sum);
-       for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
-               if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
-               else s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.Mpriv_cnt[i],
-                       Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
-               if (i == 0x24) { // mov from CR
-                       s += sprintf(s,"            [");
-                       for (j=0; j < 128; j++) if (privcnt.from_cr_cnt[j]) {
-                               if (!cr_str[j])
-                                       s += sprintf(s,"PRIVSTRING NULL!!\n");
-                               else
-                                       s += sprintf(s,"%s(%ld),",cr_str[j],
-                                                    privcnt.from_cr_cnt[j]);
-                       }
-                       s += sprintf(s,"]\n");
-               }
-               else if (i == 0x2c) { // mov to CR
-                       s += sprintf(s,"            [");
-                       for (j=0; j < 128; j++) if (privcnt.to_cr_cnt[j]) {
-                               if (!cr_str[j])
-                                       s += sprintf(s,"PRIVSTRING NULL!!\n");
-                               else
-                                       s += sprintf(s,"%s(%ld),",cr_str[j],
-                                                    privcnt.to_cr_cnt[j]);
-                       }
-                       s += sprintf(s,"]\n");
-               }
-       }
-       return s - buf;
-}
-
-static int zero_privop_counts(char *buf)
-{
-       int i, j;
-       char *s = buf;
-
-       // this is ugly and should probably produce sorted output
-       // but it will have to do for now
-       privcnt.mov_to_ar_imm = 0;
-       privcnt.mov_to_ar_reg = 0;
-       privcnt.mov_from_ar = 0;
-       privcnt.ssm = 0; privcnt.rsm = 0;
-       privcnt.rfi = 0; privcnt.bsw0 = 0;
-       privcnt.bsw1 = 0; privcnt.cover = 0;
-       privcnt.fc = 0; privcnt.cpuid = 0;
-       for (i=0; i < 64; i++)
-               privcnt.Mpriv_cnt[i] = 0;
-       for (j=0; j < 128; j++)
-               privcnt.from_cr_cnt[j] = 0;
-       for (j=0; j < 128; j++)
-               privcnt.to_cr_cnt[j] = 0;
-       s += sprintf(s,"All privop statistics zeroed\n");
-       return s - buf;
-}
-
 static const char * const hyperpriv_str[HYPERPRIVOP_MAX+1] = {
        0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
        "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
        "=rr", "rr=", "kr=", "fc", "=cpuid", "=pmd", "=ar.eflg", "ar.eflg="
 };
-
-
-static int dump_hyperprivop_counts(char *buf)
-{
-       int i;
-       char *s = buf;
-       unsigned long total = 0;
-       for (i = 1; i <= HYPERPRIVOP_MAX; i++)
-               total += slow_hyperpriv_cnt[i];
-       s += sprintf(s,"Slow hyperprivops (total %ld):\n",total);
-       for (i = 1; i <= HYPERPRIVOP_MAX; i++)
-               if (slow_hyperpriv_cnt[i])
-                       s += sprintf(s,"%10ld %s\n",
-                               slow_hyperpriv_cnt[i], hyperpriv_str[i]);
-       total = 0;
-       for (i = 1; i <= HYPERPRIVOP_MAX; i++)
-               total += fast_hyperpriv_cnt[i];
-       s += sprintf(s,"Fast hyperprivops (total %ld):\n",total);
-       for (i = 1; i <= HYPERPRIVOP_MAX; i++)
-               if (fast_hyperpriv_cnt[i])
-                       s += sprintf(s,"%10ld %s\n",
-                               fast_hyperpriv_cnt[i], hyperpriv_str[i]);
-       return s - buf;
-}
-
-static void zero_hyperprivop_counts(void)
-{
-       int i;
-       for (i = 0; i <= HYPERPRIVOP_MAX; i++)
-               slow_hyperpriv_cnt[i] = 0;
-       for (i = 0; i <= HYPERPRIVOP_MAX; i++)
-               fast_hyperpriv_cnt[i] = 0;
-}
-
-static void zero_reflect_counts(void)
-{
-       int i;
-       for (i=0; i < 0x80; i++)
-               slow_reflect_count[i] = 0;
-       for (i=0; i < 0x80; i++)
-               fast_reflect_count[i] = 0;
-}
-
-static int dump_reflect_counts(char *buf)
-{
-       int i,j,cnt;
-       char *s = buf;
-
-       s += sprintf(s,"Slow reflections by vector:\n");
-       for (i = 0, j = 0; i < 0x80; i++) {
-               if ( (cnt = slow_reflect_count[i]) != 0 ) {
-                       s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
-                       if ((j++ & 3) == 3)
-                               s += sprintf(s,"\n");
-               }
-       }
-       if (j & 3)
-               s += sprintf(s,"\n");
-       s += sprintf(s,"Fast reflections by vector:\n");
-       for (i = 0, j = 0; i < 0x80; i++) {
-               if ( (cnt = fast_reflect_count[i]) != 0 ) {
-                       s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
-                       if ((j++ & 3) == 3)
-                               s += sprintf(s,"\n");
-               }
-       }
-       if (j & 3)
-               s += sprintf(s,"\n");
-       return s - buf;
-}
-
+#endif
 
 #define TMPBUFLEN 8*1024
 int dump_privop_counts_to_user(char __user *ubuf, int len)
@@ -309,9 +129,7 @@ int dump_privop_counts_to_user(char __us
        if (len < TMPBUFLEN)
                return -1;
 
-       n = dump_privop_counts(buf);
-       n += dump_hyperprivop_counts(buf + n);
-       n += dump_reflect_counts(buf + n);
+       n = 0;
 #ifdef PRIVOP_ADDR_COUNT
        n += dump_privop_addrs(buf + n);
 #endif
@@ -323,21 +141,8 @@ int dump_privop_counts_to_user(char __us
 
 int zero_privop_counts_to_user(char __user *ubuf, int len)
 {
-       char buf[TMPBUFLEN];
-       int n;
-
-       if (len < TMPBUFLEN)
-               return -1;
-
-       n = zero_privop_counts(buf);
-
-       zero_hyperprivop_counts();
 #ifdef PRIVOP_ADDR_COUNT
        zero_privop_addrs();
 #endif
-       zero_vhpt_stats();
-       zero_reflect_counts();
-       if (__copy_to_user(ubuf,buf,n))
-               return -1;
-       return n;
+       return 0;
 }
diff -r e7394daf098d -r 7cde0d938ef4 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/arch/ia64/xen/vhpt.c  Fri Aug 04 09:02:43 2006 -0600
@@ -261,11 +261,6 @@ void flush_tlb_mask(cpumask_t mask)
             (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
 }
 
-void zero_vhpt_stats(void)
-{
-       return;
-}
-
 int dump_vhpt_stats(char *buf)
 {
        int i, cpu;
diff -r e7394daf098d -r 7cde0d938ef4 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/common/page_alloc.c   Fri Aug 04 09:02:43 2006 -0600
@@ -24,7 +24,6 @@
 #include <xen/init.h>
 #include <xen/types.h>
 #include <xen/lib.h>
-#include <xen/perfc.h>
 #include <xen/sched.h>
 #include <xen/spinlock.h>
 #include <xen/mm.h>
@@ -33,6 +32,7 @@
 #include <xen/shadow.h>
 #include <xen/domain_page.h>
 #include <xen/keyhandler.h>
+#include <xen/perfc.h>
 #include <asm/page.h>
 
 /*
diff -r e7394daf098d -r 7cde0d938ef4 xen/include/asm-ia64/perfc_defn.h
--- a/xen/include/asm-ia64/perfc_defn.h Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/include/asm-ia64/perfc_defn.h Fri Aug 04 09:02:43 2006 -0600
@@ -1,21 +1,42 @@
 /* This file is legitimately included multiple times. */
 
-PERFCOUNTER_CPU(dtlb_translate,                "dtlb hit")
+PERFCOUNTER_CPU(dtlb_translate,       "dtlb hit")
 
-PERFCOUNTER_CPU(tr_translate,          "TR hit")
+PERFCOUNTER_CPU(tr_translate,         "TR hit")
 
-PERFCOUNTER_CPU(vhpt_translate,                "virtual vhpt translation")
-PERFCOUNTER_CPU(fast_vhpt_translate,   "virtual vhpt fast translation")
+PERFCOUNTER_CPU(vhpt_translate,       "virtual vhpt translation")
+PERFCOUNTER_CPU(fast_vhpt_translate,  "virtual vhpt fast translation")
 
-PERFCOUNTER(recover_to_page_fault,     "recoveries to page fault")
-PERFCOUNTER(recover_to_break_fault,    "recoveries to break fault")
+PERFCOUNTER(recover_to_page_fault,    "recoveries to page fault")
+PERFCOUNTER(recover_to_break_fault,   "recoveries to break fault")
 
-PERFCOUNTER_CPU(phys_translate,                "metaphysical translation")
+PERFCOUNTER_CPU(phys_translate,       "metaphysical translation")
 
-PERFCOUNTER_CPU(idle_when_pending,     "vcpu idle at event")
+PERFCOUNTER_CPU(idle_when_pending,    "vcpu idle at event")
 
-PERFCOUNTER_CPU(pal_halt_light,                "calls to pal_halt_light")
+PERFCOUNTER_CPU(pal_halt_light,       "calls to pal_halt_light")
 
-PERFCOUNTER_CPU(context_switch,                "context switch")
+PERFCOUNTER_CPU(lazy_cover,           "lazy cover")
 
-PERFCOUNTER_CPU(lazy_cover,            "lazy cover")
+PERFCOUNTER_CPU(mov_to_ar_imm,        "privop mov_to_ar_imm")
+PERFCOUNTER_CPU(mov_to_ar_reg,        "privop mov_to_ar_reg")
+PERFCOUNTER_CPU(mov_from_ar,          "privop privified-mov_from_ar")
+PERFCOUNTER_CPU(ssm,                  "privop ssm")
+PERFCOUNTER_CPU(rsm,                  "privop rsm")
+PERFCOUNTER_CPU(rfi,                  "privop rfi")
+PERFCOUNTER_CPU(bsw0,                 "privop bsw0")
+PERFCOUNTER_CPU(bsw1,                 "privop bsw1")
+PERFCOUNTER_CPU(cover,                "privop cover")
+PERFCOUNTER_CPU(fc,                   "privop privified-fc")
+PERFCOUNTER_CPU(cpuid,                "privop privified-cpuid")
+
+PERFCOUNTER_ARRAY(mov_to_cr,          "privop mov to cr", 128)
+PERFCOUNTER_ARRAY(mov_from_cr,        "privop mov from cr", 128)
+
+PERFCOUNTER_ARRAY(misc_privop,        "privop misc", 64)
+
+PERFCOUNTER_ARRAY(slow_hyperprivop,   "slow hyperprivops", HYPERPRIVOP_MAX + 1)
+PERFCOUNTER_ARRAY(fast_hyperprivop,   "fast hyperprivops", HYPERPRIVOP_MAX + 1)
+
+PERFCOUNTER_ARRAY(slow_reflect,       "slow reflection", 0x80)
+PERFCOUNTER_ARRAY(fast_reflect,       "fast reflection", 0x80)
diff -r e7394daf098d -r 7cde0d938ef4 xen/include/asm-ia64/privop_stat.h
--- a/xen/include/asm-ia64/privop_stat.h        Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/include/asm-ia64/privop_stat.h        Fri Aug 04 09:02:43 2006 -0600
@@ -6,32 +6,6 @@ extern int zero_privop_counts_to_user(ch
 extern int zero_privop_counts_to_user(char *, int);
 
 #define PRIVOP_ADDR_COUNT
-
-extern unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1];
-extern unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1];
-
-extern unsigned long slow_reflect_count[0x80];
-extern unsigned long fast_reflect_count[0x80];
-
-struct privop_counters {
-       unsigned long mov_to_ar_imm;
-       unsigned long mov_to_ar_reg;
-       unsigned long mov_from_ar;
-       unsigned long ssm;
-       unsigned long rsm;
-       unsigned long rfi;
-       unsigned long bsw0;
-       unsigned long bsw1;
-       unsigned long cover;
-       unsigned long fc;
-       unsigned long cpuid;
-       unsigned long Mpriv_cnt[64];
-
-       unsigned long to_cr_cnt[128]; /* Number of mov to cr privop.  */
-       unsigned long from_cr_cnt[128]; /* Number of mov from cr privop.  */
-};
-
-extern struct privop_counters privcnt;
 
 #ifdef PRIVOP_ADDR_COUNT
 
diff -r e7394daf098d -r 7cde0d938ef4 xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Fri Aug 04 08:37:24 2006 -0600
+++ b/xen/include/asm-ia64/vhpt.h       Fri Aug 04 09:02:43 2006 -0600
@@ -36,7 +36,6 @@ struct vhpt_lf_entry {
 #define INVALID_TI_TAG 0x8000000000000000L
 
 extern void vhpt_init (void);
-extern void zero_vhpt_stats(void);
 extern int dump_vhpt_stats(char *buf);
 extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
                                 unsigned long logps);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] convert more privop_stat to perfc, Xen patchbot-unstable <=