# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b0ee5d9b9ebda162ae478c8da2a78c0711bc5c85
# Parent 123ff1c707285479f76af8d19f457a5d49fe2aff
Clean up the xenoprofile hypercall interface.
Signed-off-by: Jose Renato Santos <jsantos@xxxxxxxxxx>
diff -r 123ff1c70728 -r b0ee5d9b9ebd
linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c
--- a/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c Wed Apr 19
18:39:36 2006 +0100
+++ b/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c Wed Apr 19
18:43:39 2006 +0100
@@ -35,8 +35,9 @@ void * vm_map_xen_pages(unsigned long ma
void * vm_map_xen_pages(unsigned long maddr, int vm_size, pgprot_t prot);
static int xenoprof_enabled = 0;
-static int num_events = 0;
+static unsigned int num_events = 0;
static int is_primary = 0;
+static int active_defined;
/* sample buffers shared with Xen */
xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
@@ -106,7 +107,7 @@ xenoprof_ovf_interrupt(int irq, void * d
xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
{
int head, tail, size;
- xenoprof_buf_t * buf;
+ struct xenoprof_buf * buf;
int cpu;
cpu = smp_processor_id();
@@ -196,28 +197,49 @@ static int xenoprof_setup(void)
static int xenoprof_setup(void)
{
int ret;
+ int i;
ret = bind_virq();
if (ret)
return ret;
if (is_primary) {
- ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters,
- (unsigned long)NULL,
- (unsigned long)NULL);
+ struct xenoprof_counter counter;
+
+ /* Define dom0 as an active domain if not done yet */
+ if (!active_defined) {
+ domid_t domid;
+ ret =
HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
+ if (ret)
+ goto err;
+ domid = 0;
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active,
&domid);
+ if (ret)
+ goto err;
+ active_defined = 1;
+ }
+
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
if (ret)
goto err;
-
- ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events,
- (unsigned long)&counter_config,
- (unsigned long)num_events);
+ for (i=0; i<num_events; i++) {
+ counter.ind = i;
+ counter.count = (uint64_t)counter_config[i].count;
+ counter.enabled = (uint32_t)counter_config[i].enabled;
+ counter.event = (uint32_t)counter_config[i].event;
+ counter.kernel = (uint32_t)counter_config[i].kernel;
+ counter.user = (uint32_t)counter_config[i].user;
+ counter.unit_mask =
(uint64_t)counter_config[i].unit_mask;
+ HYPERVISOR_xenoprof_op(XENOPROF_counter,
+ &counter);
+ }
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
+
if (ret)
goto err;
}
- ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq,
- (unsigned long)NULL,
- (unsigned long)NULL);
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
if (ret)
goto err;
@@ -233,17 +255,15 @@ static void xenoprof_shutdown(void)
{
xenoprof_enabled = 0;
- HYPERVISOR_xenoprof_op(XENOPROF_disable_virq,
- (unsigned long)NULL,
- (unsigned long)NULL);
+ HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
if (is_primary) {
- HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
- (unsigned long)NULL,
- (unsigned long)NULL);
+ HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
+ active_defined = 0;
}
unbind_virq();
+
}
@@ -252,9 +272,8 @@ static int xenoprof_start(void)
int ret = 0;
if (is_primary)
- ret = HYPERVISOR_xenoprof_op(XENOPROF_start,
- (unsigned long)NULL,
- (unsigned long)NULL);
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
+
return ret;
}
@@ -262,20 +281,43 @@ static void xenoprof_stop(void)
static void xenoprof_stop(void)
{
if (is_primary)
- HYPERVISOR_xenoprof_op(XENOPROF_stop,
- (unsigned long)NULL,
- (unsigned long)NULL);
+ HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
}
static int xenoprof_set_active(int * active_domains,
- unsigned int adomains)
+ unsigned int adomains)
{
int ret = 0;
- if (is_primary)
- ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active,
- (unsigned long)active_domains,
- (unsigned long)adomains);
+ int i;
+ int set_dom0 = 0;
+ domid_t domid;
+
+ if (!is_primary)
+ return 0;
+
+ if (adomains > MAX_OPROF_DOMAINS)
+ return -E2BIG;
+
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
+ if (ret)
+ return ret;
+
+ for (i=0; i<adomains; i++) {
+ domid = active_domains[i];
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
+ if (ret)
+ return (ret);
+ if (active_domains[i] == 0)
+ set_dom0 = 1;
+ }
+ /* dom0 must always be active but may not be in the list */
+ if (!set_dom0) {
+ domid = 0;
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
+ }
+
+ active_defined = 1;
return ret;
}
@@ -325,44 +367,48 @@ static int using_xenoprof;
int __init oprofile_arch_init(struct oprofile_operations * ops)
{
- xenoprof_init_result_t result;
- xenoprof_buf_t * buf;
- int max_samples = 16;
+ struct xenoprof_init init;
+ struct xenoprof_buf * buf;
int vm_size;
int npages;
+ int ret;
int i;
- int ret = HYPERVISOR_xenoprof_op(XENOPROF_init,
- (unsigned long)max_samples,
- (unsigned long)&result);
+ init.max_samples = 16;
+ ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
if (!ret) {
pgprot_t prot = __pgprot(_KERNPG_TABLE);
- num_events = result.num_events;
- is_primary = result.is_primary;
- nbuf = result.nbuf;
-
- npages = (result.bufsize * nbuf - 1) / PAGE_SIZE + 1;
+ num_events = init.num_events;
+ is_primary = init.is_primary;
+ nbuf = init.nbuf;
+
+ /* just in case - make sure we do not overflow event list
+ (i.e. counter_config list) */
+ if (num_events > OP_MAX_COUNTER)
+ num_events = OP_MAX_COUNTER;
+
+ npages = (init.bufsize * nbuf - 1) / PAGE_SIZE + 1;
vm_size = npages * PAGE_SIZE;
- shared_buffer = (char *) vm_map_xen_pages(result.buf_maddr,
- vm_size, prot);
+ shared_buffer = (char *)vm_map_xen_pages(init.buf_maddr,
+ vm_size, prot);
if (!shared_buffer) {
ret = -ENOMEM;
goto out;
}
for (i=0; i< nbuf; i++) {
- buf = (xenoprof_buf_t*)
- &shared_buffer[i * result.bufsize];
+ buf = (struct xenoprof_buf*)
+ &shared_buffer[i * init.bufsize];
BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
xenoprof_buf[buf->vcpu_id] = buf;
}
/* cpu_type is detected by Xen */
cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
- strncpy(cpu_type, result.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
+ strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
xenoprof_ops.cpu_type = cpu_type;
init_driverfs();
@@ -371,6 +417,8 @@ int __init oprofile_arch_init(struct opr
for (i=0; i<NR_CPUS; i++)
ovf_irq[i] = -1;
+
+ active_defined = 0;
}
out:
printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, "
@@ -389,7 +437,5 @@ void __exit oprofile_arch_exit(void)
shared_buffer = NULL;
}
if (is_primary)
- HYPERVISOR_xenoprof_op(XENOPROF_shutdown,
- (unsigned long)NULL,
- (unsigned long)NULL);
-}
+ HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
+}
diff -r 123ff1c70728 -r b0ee5d9b9ebd
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h Wed Apr
19 18:39:36 2006 +0100
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypercall.h Wed Apr
19 18:43:39 2006 +0100
@@ -338,9 +338,9 @@ HYPERVISOR_callback_op(
static inline int
HYPERVISOR_xenoprof_op(
- int op, unsigned long arg1, unsigned long arg2)
-{
- return _hypercall3(int, xenoprof_op, op, arg1, arg2);
+ int op, void *arg)
+{
+ return _hypercall2(int, xenoprof_op, op, arg);
}
diff -r 123ff1c70728 -r b0ee5d9b9ebd xen/arch/x86/oprofile/xenoprof.c
--- a/xen/arch/x86/oprofile/xenoprof.c Wed Apr 19 18:39:36 2006 +0100
+++ b/xen/arch/x86/oprofile/xenoprof.c Wed Apr 19 18:43:39 2006 +0100
@@ -4,6 +4,7 @@
* (email: xenoprof@xxxxxxxxxxxxx)
*/
+#include <xen/guest_access.h>
#include <xen/sched.h>
#include <public/xenoprof.h>
@@ -12,7 +13,7 @@
/* Limit amount of pages used for shared buffer (per domain) */
#define MAX_OPROF_SHARED_PAGES 32
-int active_domains[MAX_OPROF_DOMAINS];
+domid_t active_domains[MAX_OPROF_DOMAINS];
int active_ready[MAX_OPROF_DOMAINS];
unsigned int adomains;
unsigned int activated;
@@ -84,7 +85,8 @@ static void xenoprof_reset_buf(struct do
int active_index(struct domain *d)
{
- int i, id = d->domain_id;
+ int i;
+ domid_t id = d->domain_id;
for ( i = 0; i < adomains; i++ )
if ( active_domains[i] == id )
@@ -137,13 +139,11 @@ int reset_active(struct domain *d)
return 0;
}
-int set_active_domains(int num)
-{
- int primary;
+int reset_active_list(void)
+{
int i;
struct domain *d;
- /* Reset any existing active domains from previous runs. */
for ( i = 0; i < adomains; i++ )
{
if ( active_ready[i] )
@@ -157,24 +157,20 @@ int set_active_domains(int num)
}
}
- adomains = num;
-
- /* Add primary profiler to list of active domains if not there yet */
- primary = active_index(primary_profiler);
- if ( primary == -1 )
- {
- /* Return if there is no space left on list. */
- if ( num >= MAX_OPROF_DOMAINS )
- return -E2BIG;
- active_domains[num] = primary_profiler->domain_id;
- num++;
- }
-
- adomains = num;
+ adomains = 0;
activated = 0;
- for ( i = 0; i < adomains; i++ )
- active_ready[i] = 0;
+ return 0;
+}
+
+int add_active_list (domid_t domid)
+{
+ if ( adomains >= MAX_OPROF_DOMAINS )
+ return -E2BIG;
+
+ active_domains[adomains] = domid;
+ active_ready[adomains] = 0;
+ adomains++;
return 0;
}
@@ -353,26 +349,31 @@ void free_xenoprof_pages(struct domain *
d->xenoprof = NULL;
}
-int xenoprof_init(int max_samples, xenoprof_init_result_t *init_result)
-{
- xenoprof_init_result_t result;
+int xenoprof_op_init(GUEST_HANDLE(void) arg)
+{
+ struct xenoprof_init xenoprof_init;
int is_primary, num_events;
struct domain *d = current->domain;
int ret;
- ret = nmi_init(&num_events, &is_primary, result.cpu_type);
+ if ( copy_from_guest(&xenoprof_init, arg, 1) )
+ return -EFAULT;
+
+ ret = nmi_init(&num_events,
+ &is_primary,
+ xenoprof_init.cpu_type);
+ if ( ret < 0 )
+ goto err;
+
if ( is_primary )
primary_profiler = current->domain;
-
- if ( ret < 0 )
- goto err;
/*
* We allocate xenoprof struct and buffers only at first time xenoprof_init
* is called. Memory is then kept until domain is destroyed.
*/
if ( (d->xenoprof == NULL) &&
- ((ret = alloc_xenoprof_struct(d, max_samples)) < 0) )
+ ((ret = alloc_xenoprof_struct(d, xenoprof_init.max_samples)) < 0) )
goto err;
xenoprof_reset_buf(d);
@@ -381,13 +382,13 @@ int xenoprof_init(int max_samples, xenop
d->xenoprof->domain_ready = 0;
d->xenoprof->is_primary = is_primary;
- result.is_primary = is_primary;
- result.num_events = num_events;
- result.nbuf = d->xenoprof->nbuf;
- result.bufsize = d->xenoprof->bufsize;
- result.buf_maddr = __pa(d->xenoprof->rawbuf);
-
- if ( copy_to_user((void *)init_result, (void *)&result, sizeof(result)) )
+ xenoprof_init.is_primary = is_primary;
+ xenoprof_init.num_events = num_events;
+ xenoprof_init.nbuf = d->xenoprof->nbuf;
+ xenoprof_init.bufsize = d->xenoprof->bufsize;
+ xenoprof_init.buf_maddr = __pa(d->xenoprof->rawbuf);
+
+ if ( copy_to_guest(arg, &xenoprof_init, 1) )
{
ret = -EFAULT;
goto err;
@@ -409,7 +410,7 @@ int xenoprof_init(int max_samples, xenop
|| (op == XENOPROF_release_counters) \
|| (op == XENOPROF_shutdown))
-int do_xenoprof_op(int op, unsigned long arg1, unsigned long arg2)
+int do_xenoprof_op(int op, GUEST_HANDLE(void) arg)
{
int ret = 0;
@@ -423,20 +424,24 @@ int do_xenoprof_op(int op, unsigned long
switch ( op )
{
case XENOPROF_init:
- ret = xenoprof_init((int)arg1, (xenoprof_init_result_t *)arg2);
- break;
-
+ ret = xenoprof_op_init(arg);
+ break;
+
+ case XENOPROF_reset_active_list:
+ {
+ ret = reset_active_list();
+ break;
+ }
case XENOPROF_set_active:
+ {
+ domid_t domid;
if ( xenoprof_state != XENOPROF_IDLE )
return -EPERM;
- if ( arg2 > MAX_OPROF_DOMAINS )
- return -E2BIG;
- if ( copy_from_user((void *)&active_domains,
- (void *)arg1, arg2*sizeof(int)) )
+ if ( copy_from_guest(&domid, arg, 1) )
return -EFAULT;
- ret = set_active_domains(arg2);
- break;
-
+ ret = add_active_list(domid);
+ break;
+ }
case XENOPROF_reserve_counters:
if ( xenoprof_state != XENOPROF_IDLE )
return -EPERM;
@@ -445,15 +450,34 @@ int do_xenoprof_op(int op, unsigned long
xenoprof_state = XENOPROF_COUNTERS_RESERVED;
break;
+ case XENOPROF_counter:
+ {
+ struct xenoprof_counter counter;
+ if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
+ return -EPERM;
+ if ( adomains == 0 )
+ return -EPERM;
+
+ if ( copy_from_guest(&counter, arg, 1) )
+ return -EFAULT;
+
+ if ( counter.ind > OP_MAX_COUNTER )
+ return -E2BIG;
+
+ counter_config[counter.ind].count = (unsigned long) counter.count;
+ counter_config[counter.ind].enabled = (unsigned long)
counter.enabled;
+ counter_config[counter.ind].event = (unsigned long) counter.event;
+ counter_config[counter.ind].kernel = (unsigned long) counter.kernel;
+ counter_config[counter.ind].user = (unsigned long) counter.user;
+ counter_config[counter.ind].unit_mask = (unsigned long)
counter.unit_mask;
+
+ ret = 0;
+ break;
+ }
+
case XENOPROF_setup_events:
if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
return -EPERM;
- if ( adomains == 0 )
- set_active_domains(0);
-
- if ( copy_from_user((void *)&counter_config, (void *)arg1,
- arg2 * sizeof(struct op_counter_config)) )
- return -EFAULT;
ret = nmi_setup_events();
if ( !ret )
xenoprof_state = XENOPROF_READY;
@@ -526,3 +550,13 @@ int do_xenoprof_op(int op, unsigned long
return ret;
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 123ff1c70728 -r b0ee5d9b9ebd xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S Wed Apr 19 18:39:36 2006 +0100
+++ b/xen/arch/x86/x86_32/entry.S Wed Apr 19 18:43:39 2006 +0100
@@ -682,7 +682,7 @@ ENTRY(hypercall_args_table)
.byte 2 /* do_nmi_op */
.byte 2 /* do_arch_sched_op */
.byte 2 /* do_callback_op */ /* 30 */
- .byte 3 /* do_xenoprof_op */
+ .byte 2 /* do_xenoprof_op */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
diff -r 123ff1c70728 -r b0ee5d9b9ebd xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S Wed Apr 19 18:39:36 2006 +0100
+++ b/xen/arch/x86/x86_64/entry.S Wed Apr 19 18:43:39 2006 +0100
@@ -590,7 +590,7 @@ ENTRY(hypercall_args_table)
.byte 2 /* do_nmi_op */
.byte 2 /* do_arch_sched_op */
.byte 2 /* do_callback_op */ /* 30 */
- .byte 3 /* do_xenoprof_op */
+ .byte 2 /* do_xenoprof_op */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
diff -r 123ff1c70728 -r b0ee5d9b9ebd xen/include/public/xenoprof.h
--- a/xen/include/public/xenoprof.h Wed Apr 19 18:39:36 2006 +0100
+++ b/xen/include/public/xenoprof.h Wed Apr 19 18:43:39 2006 +0100
@@ -12,18 +12,22 @@
#define __XEN_PUBLIC_XENOPROF_H__
/*
- * Commands to HYPERVISOR_pmc_op().
+ * Commands to HYPERVISOR_xenoprof_op().
*/
-#define XENOPROF_init 0
-#define XENOPROF_set_active 1
-#define XENOPROF_reserve_counters 3
-#define XENOPROF_setup_events 4
-#define XENOPROF_enable_virq 5
-#define XENOPROF_start 6
-#define XENOPROF_stop 7
-#define XENOPROF_disable_virq 8
-#define XENOPROF_release_counters 9
-#define XENOPROF_shutdown 10
+#define XENOPROF_init 0
+#define XENOPROF_reset_active_list 1
+#define XENOPROF_reset_passive_list 2
+#define XENOPROF_set_active 3
+#define XENOPROF_set_passive 4
+#define XENOPROF_reserve_counters 5
+#define XENOPROF_counter 6
+#define XENOPROF_setup_events 7
+#define XENOPROF_enable_virq 8
+#define XENOPROF_start 9
+#define XENOPROF_stop 10
+#define XENOPROF_disable_virq 11
+#define XENOPROF_release_counters 12
+#define XENOPROF_shutdown 13
#define MAX_OPROF_EVENTS 32
#define MAX_OPROF_DOMAINS 25
@@ -50,25 +54,29 @@ typedef struct xenoprof_buf {
} xenoprof_buf_t;
DEFINE_GUEST_HANDLE(xenoprof_buf_t);
-typedef struct xenoprof_init_result {
+typedef struct xenoprof_init {
+ int32_t max_samples;
int32_t num_events;
int32_t is_primary;
int32_t nbuf;
int32_t bufsize;
uint64_t buf_maddr;
char cpu_type[XENOPROF_CPU_TYPE_SIZE];
-} xenoprof_init_result_t;
-DEFINE_GUEST_HANDLE(xenoprof_init_result_t);
+} xenoprof_init_t;
+DEFINE_GUEST_HANDLE(xenoprof_init_t);
-typedef struct xenoprof_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
-} xenoprof_counter_config_t;
-DEFINE_GUEST_HANDLE(xenoprof_counter_config_t);
+typedef struct xenoprof_counter {
+ uint32_t ind;
+ uint64_t count;
+ uint32_t enabled;
+ uint32_t event;
+ uint32_t hypervisor;
+ uint32_t kernel;
+ uint32_t user;
+ uint64_t unit_mask;
+} xenoprof_counter_t;
+DEFINE_GUEST_HANDLE(xenoprof_counter_t);
+
#endif /* __XEN_PUBLIC_XENOPROF_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|