WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

Re: [Xen-devel] [PATCH 4/5 TAKE 2] xenoprof: make linux xenoprof code ar

Updated following Renato's comments.

Changes:
- __exit clean up
- removed perfmon comments

# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1163486062 -32400
# Node ID 9bf71c7abb2de71ac233c24d821978ce2e6cc961
# Parent  e9e5f10f89615f88d433a670a69a2ba4af60c791
make xenoprof of linux side arch generic with some bug fixes.
PATCHNAME: make_xenoprof_of_linux_side_arch_generic

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r e9e5f10f8961 -r 9bf71c7abb2d 
linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c
--- a/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c        Tue Nov 14 
15:34:21 2006 +0900
+++ b/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c        Tue Nov 14 
15:34:22 2006 +0900
@@ -15,12 +15,130 @@
  *                    VA Linux Systems Japan K.K.
  */
 
+#include <linux/init.h>
 #include <linux/oprofile.h>
+#include <linux/sched.h>
+#include <asm/pgtable.h>
 
+#include <xen/driver_util.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/xenoprof.h>
 #include <xen/xenoprof.h>
 #include "op_counter.h"
 
-unsigned int num_events = 0;
+static unsigned int num_events = 0;
+
+void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
+{
+       num_events = init->num_events;
+       /* just in case - make sure we do not overflow event list 
+          (i.e. counter_config list) */
+       if (num_events > OP_MAX_COUNTER) {
+               num_events = OP_MAX_COUNTER;
+               init->num_events = num_events;
+       }
+}
+
+void xenoprof_arch_counter(void)
+{
+       int i;
+       struct xenoprof_counter counter;
+
+       for (i=0; i<num_events; i++) {
+               counter.ind       = i;
+               counter.count     = (uint64_t)counter_config[i].count;
+               counter.enabled   = (uint32_t)counter_config[i].enabled;
+               counter.event     = (uint32_t)counter_config[i].event;
+               counter.kernel    = (uint32_t)counter_config[i].kernel;
+               counter.user      = (uint32_t)counter_config[i].user;
+               counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
+               HYPERVISOR_xenoprof_op(XENOPROF_counter, 
+                                      &counter);
+       }
+}
+
+void xenoprof_arch_start(void) 
+{
+       /* nothing */
+}
+
+void xenoprof_arch_stop(void)
+{
+       /* nothing */
+}
+
+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
+{
+       if (sbuf->buffer) {
+               vunmap(sbuf->buffer);
+               sbuf->buffer = NULL;
+       }
+}
+
+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
+                                   struct xenoprof_shared_buffer * sbuf)
+{
+       int npages, ret;
+       struct vm_struct *area;
+
+       sbuf->buffer = NULL;
+       if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
+               return ret;
+
+       npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
+
+       area = alloc_vm_area(npages * PAGE_SIZE);
+       if (area == NULL)
+               return -ENOMEM;
+
+       if ( (ret = direct_kernel_remap_pfn_range(
+                     (unsigned long)area->addr,
+                     get_buffer->buf_maddr >> PAGE_SHIFT,
+                     npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
+                     DOMID_SELF)) ) {
+               vunmap(area->addr);
+               return ret;
+       }
+
+       sbuf->buffer = area->addr;
+       return ret;
+}
+
+int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
+                             struct xenoprof_shared_buffer * sbuf)
+{
+       int ret;
+       int npages;
+       struct vm_struct *area;
+       pgprot_t prot = __pgprot(_KERNPG_TABLE);
+
+       sbuf->buffer = NULL;
+       ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
+       if (ret)
+               goto out;
+
+       npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
+
+       area = alloc_vm_area(npages * PAGE_SIZE);
+       if (area == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = direct_kernel_remap_pfn_range(
+               (unsigned long)area->addr,
+               pdomain->buf_maddr >> PAGE_SHIFT,
+               npages * PAGE_SIZE, prot, DOMID_SELF);
+       if (ret) {
+               vunmap(area->addr);
+               goto out;
+       }
+       sbuf->buffer = area->addr;
+
+out:
+       return ret;
+}
+
 struct op_counter_config counter_config[OP_MAX_COUNTER];
 
 int xenoprof_create_files(struct super_block * sb, struct dentry * root)
@@ -49,3 +167,13 @@ int xenoprof_create_files(struct super_b
 
        return 0;
 }
+
+int __init oprofile_arch_init(struct oprofile_operations * ops)
+{
+       return xenoprofile_init(ops);
+}
+
+void oprofile_arch_exit(void)
+{
+       xenoprofile_exit();
+}
diff -r e9e5f10f8961 -r 9bf71c7abb2d 
linux-2.6-xen-sparse/drivers/xen/xenoprof/xenoprofile.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenoprof/xenoprofile.c   Tue Nov 14 
15:34:21 2006 +0900
+++ b/linux-2.6-xen-sparse/drivers/xen/xenoprof/xenoprofile.c   Tue Nov 14 
15:34:22 2006 +0900
@@ -1,5 +1,5 @@
 /**
- * @file xenoprof.c
+ * @file xenoprofile.c
  *
  * @remark Copyright 2002 OProfile authors
  * @remark Read the file COPYING
@@ -23,14 +23,9 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/vmalloc.h>
-#include <asm/nmi.h>
-#include <asm/msr.h>
-#include <asm/apic.h>
 #include <asm/pgtable.h>
 #include <xen/evtchn.h>
 #include <xen/xenoprof.h>
-#include "../../../arch/i386/oprofile/op_counter.h"
-
 #include <xen/driver_util.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/xenoprof.h>
@@ -39,18 +34,23 @@
 
 #define MAX_XENOPROF_SAMPLES 16
 
-static int xenoprof_start(void);
-static void xenoprof_stop(void);
-
-static int xenoprof_enabled = 0;
-extern unsigned int num_events;
-static int is_primary = 0;
-static int active_defined;
-
 /* sample buffers shared with Xen */
 xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
 /* Shared buffer area */
-char * shared_buffer = NULL;
+struct xenoprof_shared_buffer shared_buffer;
+
+/* Passive sample buffers shared with Xen */
+xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
+/* Passive shared buffer area */
+struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
+
+static int xenoprof_start(void);
+static void xenoprof_stop(void);
+
+static int xenoprof_enabled = 0;
+int xenoprof_is_primary = 0;
+static int active_defined;
+
 /* Number of buffers in shared area (one per VCPU) */
 int nbuf;
 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
@@ -58,11 +58,6 @@ int ovf_irq[NR_CPUS];
 /* cpu model type string - copied from Xen memory space on XENOPROF_init 
command */
 char cpu_type[XENOPROF_CPU_TYPE_SIZE];
 
-/* Passive sample buffers shared with Xen */
-xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
-/* Passive shared buffer area */
-char *p_shared_buffer[MAX_OPROF_DOMAINS];
-
 #ifdef CONFIG_PM
 
 static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
@@ -103,7 +98,7 @@ static int __init init_driverfs(void)
 }
 
 
-static void __exit exit_driverfs(void)
+static void exit_driverfs(void)
 {
        sysdev_unregister(&device_oprofile);
        sysdev_class_unregister(&oprofile_sysclass);
@@ -193,7 +188,7 @@ xenoprof_ovf_interrupt(int irq, void * d
 
        xenoprof_add_pc(buf, 0);
 
-       if (is_primary && !test_and_set_bit(0, &flag)) {
+       if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
                xenoprof_handle_passive();
                smp_mb__before_clear_bit();
                clear_bit(0, &flag);
@@ -207,7 +202,7 @@ static void unbind_virq(void)
 {
        int i;
 
-       for_each_cpu(i) {
+       for_each_online_cpu(i) {
                if (ovf_irq[i] >= 0) {
                        unbind_from_irqhandler(ovf_irq[i], NULL);
                        ovf_irq[i] = -1;
@@ -220,7 +215,7 @@ static int bind_virq(void)
 {
        int i, result;
 
-       for_each_cpu(i) {
+       for_each_online_cpu(i) {
                result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
                                                 i,
                                                 xenoprof_ovf_interrupt,
@@ -240,40 +235,33 @@ static int bind_virq(void)
 }
 
 
+static void unmap_passive_list(void)
+{
+       int i;
+       for (i = 0; i < pdomains; i++)
+               xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
+       pdomains = 0;
+}
+
+
 static int map_xenoprof_buffer(int max_samples)
 {
        struct xenoprof_get_buffer get_buffer;
        struct xenoprof_buf *buf;
-       int npages, ret, i;
-       struct vm_struct *area;
-
-       if ( shared_buffer )
+       int ret, i;
+
+       if ( shared_buffer.buffer )
                return 0;
 
        get_buffer.max_samples = max_samples;
-
-       if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, &get_buffer)) )
+       ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
+       if (ret)
                return ret;
-
        nbuf = get_buffer.nbuf;
-       npages = (get_buffer.bufsize * nbuf - 1) / PAGE_SIZE + 1;
-
-       area = alloc_vm_area(npages * PAGE_SIZE);
-       if (area == NULL)
-               return -ENOMEM;
-
-       if ( (ret = direct_kernel_remap_pfn_range(
-                     (unsigned long)area->addr,
-                     get_buffer.buf_maddr >> PAGE_SHIFT,
-                     npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE), DOMID_SELF)) 
) {
-               vunmap(area->addr);
-               return ret;
-       }
-
-       shared_buffer = area->addr;
+
        for (i=0; i< nbuf; i++) {
                buf = (struct xenoprof_buf*) 
-                       &shared_buffer[i * get_buffer.bufsize];
+                       &shared_buffer.buffer[i * get_buffer.bufsize];
                BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
                xenoprof_buf[buf->vcpu_id] = buf;
        }
@@ -285,7 +273,6 @@ static int xenoprof_setup(void)
 static int xenoprof_setup(void)
 {
        int ret;
-       int i;
 
        if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
                return ret;
@@ -293,9 +280,7 @@ static int xenoprof_setup(void)
        if ( (ret = bind_virq()) )
                return ret;
 
-       if (is_primary) {
-               struct xenoprof_counter counter;
-
+       if (xenoprof_is_primary) {
                /* Define dom0 as an active domain if not done yet */
                if (!active_defined) {
                        domid_t domid;
@@ -312,17 +297,7 @@ static int xenoprof_setup(void)
                ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
                if (ret)
                        goto err;
-               for (i=0; i<num_events; i++) {
-                       counter.ind       = i;
-                       counter.count     = (uint64_t)counter_config[i].count;
-                       counter.enabled   = (uint32_t)counter_config[i].enabled;
-                       counter.event     = (uint32_t)counter_config[i].event;
-                       counter.kernel    = (uint32_t)counter_config[i].kernel;
-                       counter.user      = (uint32_t)counter_config[i].user;
-                       counter.unit_mask = 
(uint64_t)counter_config[i].unit_mask;
-                       HYPERVISOR_xenoprof_op(XENOPROF_counter, 
-                                              &counter);
-               }
+               xenoprof_arch_counter();
                ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
 
                if (ret)
@@ -347,13 +322,16 @@ static void xenoprof_shutdown(void)
 
        HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
 
-       if (is_primary) {
+       if (xenoprof_is_primary) {
                HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
                active_defined = 0;
        }
 
        unbind_virq();
 
+       xenoprof_arch_unmap_shared_buffer(&shared_buffer);
+       if (xenoprof_is_primary)
+               unmap_passive_list();
 }
 
 
@@ -361,17 +339,19 @@ static int xenoprof_start(void)
 {
        int ret = 0;
 
-       if (is_primary)
+       if (xenoprof_is_primary)
                ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
-
+       if (!ret)
+               xenoprof_arch_start();
        return ret;
 }
 
 
 static void xenoprof_stop(void)
 {
-       if (is_primary)
+       if (xenoprof_is_primary)
                HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
+       xenoprof_arch_stop();
 }
 
 
@@ -383,7 +363,7 @@ static int xenoprof_set_active(int * act
        int set_dom0 = 0;
        domid_t domid;
 
-       if (!is_primary)
+       if (!xenoprof_is_primary)
                return 0;
 
        if (adomains > MAX_OPROF_DOMAINS)
@@ -423,12 +403,9 @@ static int xenoprof_set_passive(int * p_
 {
        int ret;
        int i, j;
-       int npages;
        struct xenoprof_buf *buf;
-       struct vm_struct *area;
-       pgprot_t prot = __pgprot(_KERNPG_TABLE);
-
-       if (!is_primary)
+
+       if (!xenoprof_is_primary)
                return 0;
 
        if (pdoms > MAX_OPROF_DOMAINS)
@@ -437,57 +414,37 @@ static int xenoprof_set_passive(int * p_
        ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
        if (ret)
                return ret;
+       unmap_passive_list();
 
        for (i = 0; i < pdoms; i++) {
                passive_domains[i].domain_id = p_domains[i];
                passive_domains[i].max_samples = 2048;
-               ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive,
-                                            &passive_domains[i]);
+               ret = xenoprof_arch_set_passive(&passive_domains[i],
+                                               &p_shared_buffer[i]);
                if (ret)
                        goto out;
-
-               npages = (passive_domains[i].bufsize * passive_domains[i].nbuf 
- 1) / PAGE_SIZE + 1;
-
-               area = alloc_vm_area(npages * PAGE_SIZE);
-               if (area == NULL) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               ret = direct_kernel_remap_pfn_range(
-                       (unsigned long)area->addr,
-                       passive_domains[i].buf_maddr >> PAGE_SHIFT,
-                       npages * PAGE_SIZE, prot, DOMID_SELF);
-               if (ret) {
-                       vunmap(area->addr);
-                       goto out;
-               }
-
-               p_shared_buffer[i] = area->addr;
-
                for (j = 0; j < passive_domains[i].nbuf; j++) {
                        buf = (struct xenoprof_buf *)
-                               &p_shared_buffer[i][j * 
passive_domains[i].bufsize];
+                               &p_shared_buffer[i].buffer[j * 
passive_domains[i].bufsize];
                        BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
                        p_xenoprof_buf[i][buf->vcpu_id] = buf;
                }
-
        }
 
        pdomains = pdoms;
        return 0;
 
 out:
-       for (j = 0; j < i; j++) {
-               vunmap(p_shared_buffer[j]);
-               p_shared_buffer[j] = NULL;
-       }
+       for (j = 0; j < i; j++)
+               xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
 
        return ret;
 }
 
 struct oprofile_operations xenoprof_ops = {
+#ifdef HAVE_XENOPROF_CREATE_FILES
        .create_files   = xenoprof_create_files,
+#endif
        .set_active     = xenoprof_set_active,
        .set_passive    = xenoprof_set_passive,
        .setup          = xenoprof_setup,
@@ -500,21 +457,15 @@ struct oprofile_operations xenoprof_ops 
 /* in order to get driverfs right */
 static int using_xenoprof;
 
-int __init oprofile_arch_init(struct oprofile_operations * ops)
+int __init xenoprofile_init(struct oprofile_operations * ops)
 {
        struct xenoprof_init init;
        int ret, i;
 
        ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
-
        if (!ret) {
-               num_events = init.num_events;
-               is_primary = init.is_primary;
-
-               /* just in case - make sure we do not overflow event list 
-                  (i.e. counter_config list) */
-               if (num_events > OP_MAX_COUNTER)
-                       num_events = OP_MAX_COUNTER;
+               xenoprof_arch_init_counter(&init);
+               xenoprof_is_primary = init.is_primary;
 
                /*  cpu_type is detected by Xen */
                cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
@@ -530,30 +481,20 @@ int __init oprofile_arch_init(struct opr
 
                active_defined = 0;
        }
-       printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, "
-              "is_primary %d\n", ret, num_events, is_primary);
+       printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
+              __func__, ret, init.num_events, xenoprof_is_primary);
        return ret;
 }
 
 
-void __exit oprofile_arch_exit(void)
-{
-       int i;
-
+void xenoprofile_exit(void)
+{
        if (using_xenoprof)
                exit_driverfs();
 
-       if (shared_buffer) {
-               vunmap(shared_buffer);
-               shared_buffer = NULL;
-       }
-       if (is_primary) {
-               for (i = 0; i < pdomains; i++)
-                       if (p_shared_buffer[i]) {
-                               vunmap(p_shared_buffer[i]);
-                               p_shared_buffer[i] = NULL;
-                       }
+       xenoprof_arch_unmap_shared_buffer(&shared_buffer);
+       if (xenoprof_is_primary) {
+               unmap_passive_list();
                HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
         }
-
-}
+}
diff -r e9e5f10f8961 -r 9bf71c7abb2d 
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/xenoprof.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/xenoprof.h     Tue Nov 
14 15:34:21 2006 +0900
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/xenoprof.h     Tue Nov 
14 15:34:22 2006 +0900
@@ -21,11 +21,28 @@
  */
 #ifndef __ASM_XENOPROF_H__
 #define __ASM_XENOPROF_H__
-#ifdef CONFIG_OPROFILE 
+#ifdef CONFIG_OPROFILE
 
 struct super_block;
 struct dentry;
 int xenoprof_create_files(struct super_block * sb, struct dentry * root);
+#define HAVE_XENOPROF_CREATE_FILES
+
+struct xenoprof_init;
+void xenoprof_arch_init_counter(struct xenoprof_init *init);
+void xenoprof_arch_counter(void);
+void xenoprof_arch_start(void);
+void xenoprof_arch_stop(void);
+
+struct xenoprof_arch_shared_buffer {
+       /* nothing */
+};
+struct xenoprof_shared_buffer;
+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
+struct xenoprof_get_buffer;
+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, 
struct xenoprof_shared_buffer* sbuf);
+struct xenoprof_passive;
+int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct 
xenoprof_shared_buffer* sbuf);
 
 #endif /* CONFIG_OPROFILE */
 #endif /* __ASM_XENOPROF_H__ */
diff -r e9e5f10f8961 -r 9bf71c7abb2d linux-2.6-xen-sparse/include/xen/xenoprof.h
--- a/linux-2.6-xen-sparse/include/xen/xenoprof.h       Tue Nov 14 15:34:21 
2006 +0900
+++ b/linux-2.6-xen-sparse/include/xen/xenoprof.h       Tue Nov 14 15:34:22 
2006 +0900
@@ -22,9 +22,24 @@
 
 #ifndef __XEN_XENOPROF_H__
 #define __XEN_XENOPROF_H__
+
 #ifdef CONFIG_OPROFILE
-
 #include <asm/xenoprof.h>
 
+struct oprofile_operations;
+int xenoprofile_init(struct oprofile_operations * ops);
+void xenoprofile_exit(void);
+
+extern int xenoprof_is_primary;
+#define is_xenoprof_primary()  (xenoprof_is_primary)
+struct xenoprof_shared_buffer {
+       char                                    *buffer;
+       struct xenoprof_arch_shared_buffer      arch;
+};
+#else
+#define xenoprofile_init(ops)  do { } while (0)
+#define xenoprofile_exit()     do { } while (0)
+#define is_xenoprof_primary()  (0)
 #endif /* CONFIG_OPROFILE */
+
 #endif /* __XEN_XENOPROF_H__ */



-- 
yamahata

Attachment: 12454_9bf71c7abb2d_make_xenoprof_of_linux_side_arch_generic.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>