Please provide me with feedback on this patch. Once some version of this goes
in
I can make xc_ptrace and gdbserver MP-aware.
Thanks.
-Kip
# This is a BitKeeper generated diff -Nru style patch.
#
# ChangeSet
# 2005/05/10 23:14:15-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx
# Split out context fetching into separate DOM0 op
# make GETDOMAININFO a little more sensible with respect to MP
# make coredump dump all cpu contexts
# Signed-off-by: Kip Macy <kmacy@xxxxxxxxxx>
#
# xen/include/public/dom0_ops.h
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +12 -2
# add getvcpucontext operation
#
# xen/common/dom0_ops.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +77 -19
# aggregate flags across eds
# return all processors in use
# separate context fetching into
#
# tools/python/xen/lowlevel/xc/xc.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +6 -2
# hack alert - cpu specifies lowest numbered processor in use
#
# tools/libxc/xc_vmx_build.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +7 -2
# update for GETDOMAININFO change
#
# tools/libxc/xc_ptrace.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +3 -8
# update for GETDOMAININFO change
#
# tools/libxc/xc_private.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +6 -9
# update for GETDOMAININFO change
#
# tools/libxc/xc_plan9_build.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +7 -3
# update for GETDOMAININFO change
#
# tools/libxc/xc_linux_save.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +18 -8
# update for GETDOMAININFO change
#
# tools/libxc/xc_linux_restore.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +0 -2
# update for GETDOMAININFO change
#
# tools/libxc/xc_linux_build.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +7 -2
# update for GETDOMAININFO change
#
# tools/libxc/xc_domain.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +16 -22
# update GETDOMAININFO usage
#
# tools/libxc/xc_core.c
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +17 -7
# dump contexts for all cpus
#
# tools/libxc/xc.h
# 2005/05/10 23:14:12-07:00 kmacy@xxxxxxxxxxxxxxxxxxxx +8 -6
# replace xc_domain_getfullinfo with xc_domain_get_vcpu_context
#
diff -Nru a/tools/libxc/xc.h b/tools/libxc/xc.h
--- a/tools/libxc/xc.h 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc.h 2005-05-09 23:19:24 -07:00
@@ -110,7 +110,9 @@
typedef struct {
u32 domid;
- unsigned int cpu;
+ u32 processors;
+ u32 flags;
+ u16 n_vcpus;
unsigned int dying:1, crashed:1, shutdown:1,
paused:1, blocked:1, running:1;
unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
@@ -195,11 +197,11 @@
* domain
* @return 0 on success, -1 on failure
*/
-int xc_domain_getfullinfo(int xc_handle,
- u32 domid,
- u32 vcpu,
- xc_domaininfo_t *info,
- vcpu_guest_context_t *ctxt);
+int xc_domain_get_vcpu_context(int xc_handle,
+ u32 domid,
+ u32 vcpu,
+ vcpu_guest_context_t *ctxt);
+
int xc_domain_setcpuweight(int xc_handle,
u32 domid,
float weight);
diff -Nru a/tools/libxc/xc_core.c b/tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_core.c 2005-05-09 23:19:24 -07:00
@@ -7,6 +7,7 @@
/* number of pages to write at a time */
#define DUMP_INCREMENT 4 * 1024
#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
+
static int
copy_from_domain_page(int xc_handle,
u32 domid,
@@ -28,13 +29,14 @@
u32 domid,
const char *corename)
{
- vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
unsigned long nr_pages;
unsigned long *page_array;
- xc_domaininfo_t st_info, *info = &st_info;
+ xc_dominfo_t info;
int i, dump_fd;
char *dump_mem, *dump_mem_start = NULL;
struct xc_core_header header;
+ vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
+
if ((dump_fd = open(corename, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR)) < 0) {
PERROR("Could not open corefile %s: %s", corename,
strerror(errno));
@@ -46,14 +48,22 @@
goto error_out;
}
- if (xc_domain_getfullinfo(xc_handle, domid, 0/* XXX hardcode */, info,
ctxt)) {
- PERROR("Could not get full info for domain");
+ if (xc_domain_getinfo(xc_handle, domid, 1, &info)) {
+ PERROR("Could not get info for domain");
goto error_out;
}
+
+ for (i = 0; i < info.n_vcpus; i++) {
+ if (xc_domain_get_vcpu_context(xc_handle, domid, i, &ctxt[i])) {
+ PERROR("Could not get all vcpu contexts for domain");
+ goto error_out;
+ }
+ }
+
+ nr_pages = info.nr_pages;
- nr_pages = info->tot_pages;
header.xch_magic = 0xF00FEBED;
- header.xch_nr_vcpus = 1; /* no interface to query at the moment */
+ header.xch_nr_vcpus = info.n_vcpus;
header.xch_nr_pages = nr_pages;
header.xch_ctxt_offset = sizeof(struct xc_core_header);
header.xch_index_offset = sizeof(struct xc_core_header) +
@@ -62,7 +72,7 @@
sizeof(vcpu_guest_context_t) + nr_pages * sizeof(unsigned long));
write(dump_fd, &header, sizeof(struct xc_core_header));
- write(dump_fd, ctxt, sizeof(st_ctxt));
+ write(dump_fd, &ctxt, sizeof(ctxt[0]) * info.n_vcpus);
if ((page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
printf("Could not allocate memory\n");
diff -Nru a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_domain.c 2005-05-09 23:19:24 -07:00
@@ -109,14 +109,12 @@
{
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)next_domid;
- op.u.getdomaininfo.exec_domain = 0; // FIX ME?!?
- op.u.getdomaininfo.ctxt = NULL; /* no exec context info, thanks. */
if ( (rc = do_dom0_op(xc_handle, &op)) < 0 )
break;
- info->domid = (u16)op.u.getdomaininfo.domain;
-
- info->cpu =
- (op.u.getdomaininfo.flags>>DOMFLAGS_CPUSHIFT) & DOMFLAGS_CPUMASK;
+ info->domid = (u16)op.u.getdomaininfo.domain;
+ info->processors = op.u.getdomaininfo.processors;
+ info->n_vcpus = op.u.getdomaininfo.n_active_vcpus;
+ info->flags = op.u.getdomaininfo.flags;
info->dying = !!(op.u.getdomaininfo.flags & DOMFLAGS_DYING);
info->crashed = !!(op.u.getdomaininfo.flags & DOMFLAGS_CRASHED);
@@ -134,28 +132,27 @@
info->shared_info_frame = op.u.getdomaininfo.shared_info_frame;
info->cpu_time = op.u.getdomaininfo.cpu_time;
- next_domid = (u16)op.u.getdomaininfo.domain + 1;
- info++;
+ next_domid = (u16)op.u.getdomaininfo.domain + 1;
+ info++;
}
- if(!nr_doms) return rc;
+ if( !nr_doms ) return rc;
return nr_doms;
}
-int xc_domain_getfullinfo(int xc_handle,
- u32 domid,
- u32 vcpu,
- xc_domaininfo_t *info,
- vcpu_guest_context_t *ctxt)
+int xc_domain_get_vcpu_context(int xc_handle,
+ u32 domid,
+ u32 vcpu,
+ vcpu_guest_context_t *ctxt)
{
int rc, errno_saved;
dom0_op_t op;
- op.cmd = DOM0_GETDOMAININFO;
- op.u.getdomaininfo.domain = (domid_t)domid;
- op.u.getdomaininfo.exec_domain = (u16)vcpu;
- op.u.getdomaininfo.ctxt = ctxt;
+ op.cmd = DOM0_GETVCPUCONTEXT;
+ op.u.getvcpucontext.domain = (domid_t)domid;
+ op.u.getvcpucontext.exec_domain = (u16)vcpu;
+ op.u.getvcpucontext.ctxt = ctxt;
if ( (ctxt != NULL) &&
((rc = mlock(ctxt, sizeof(*ctxt))) != 0) )
@@ -170,10 +167,7 @@
errno = errno_saved;
}
- if ( info != NULL )
- memcpy(info, &op.u.getdomaininfo, sizeof(*info));
-
- if ( ((u16)op.u.getdomaininfo.domain != domid) && (rc > 0) )
+ if ( rc > 0 )
return -ESRCH;
else
return rc;
diff -Nru a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_linux_build.c 2005-05-09 23:19:24 -07:00
@@ -356,14 +356,19 @@
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)domid;
- op.u.getdomaininfo.exec_domain = 0;
- op.u.getdomaininfo.ctxt = ctxt;
if ( (do_dom0_op(xc_handle, &op) < 0) ||
((u16)op.u.getdomaininfo.domain != domid) )
{
PERROR("Could not get info on domain");
goto error_out;
}
+
+ if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) )
+ {
+ PERROR("Could not get vcpu context");
+ goto error_out;
+ }
+
if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
(ctxt->pt_base != 0) )
{
diff -Nru a/tools/libxc/xc_linux_restore.c b/tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_linux_restore.c 2005-05-09 23:19:24 -07:00
@@ -181,8 +181,6 @@
/* Get the domain's shared-info frame. */
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)dom;
- op.u.getdomaininfo.exec_domain = 0;
- op.u.getdomaininfo.ctxt = NULL;
if ( do_dom0_op(xc_handle, &op) < 0 )
{
xcio_error(ioctxt, "Could not get information on new domain");
diff -Nru a/tools/libxc/xc_linux_save.c b/tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_linux_save.c 2005-05-09 23:19:24 -07:00
@@ -324,7 +324,7 @@
int suspend_and_state(int xc_handle, XcIOContext *ioctxt,
- xc_domaininfo_t *info,
+ xc_dominfo_t *info,
vcpu_guest_context_t *ctxt)
{
int i=0;
@@ -333,13 +333,18 @@
retry:
- if ( xc_domain_getfullinfo(xc_handle, ioctxt->domain, /* FIXME */ 0,
- info, ctxt) )
+ if ( xc_domain_getinfo(xc_handle, ioctxt->domain, 1, info) )
{
xcio_error(ioctxt, "Could not get full domain info");
return -1;
}
+ if ( xc_domain_get_vcpu_context(xc_handle, ioctxt->domain, 0 /* XXX */,
+ ctxt) )
+ {
+ xcio_error(ioctxt, "Could not get vcpu context");
+ }
+
if ( (info->flags &
(DOMFLAGS_SHUTDOWN | (SHUTDOWN_suspend<<DOMFLAGS_SHUTDOWNSHIFT))) ==
(DOMFLAGS_SHUTDOWN | (SHUTDOWN_suspend<<DOMFLAGS_SHUTDOWNSHIFT)) )
@@ -374,7 +379,7 @@
int xc_linux_save(int xc_handle, XcIOContext *ioctxt)
{
- xc_domaininfo_t info;
+ xc_dominfo_t info;
int rc = 1, i, j, k, last_iter, iter = 0;
unsigned long mfn;
@@ -444,13 +449,18 @@
xcio_perror(ioctxt, "Unable to mlock ctxt");
return 1;
}
-
- if ( xc_domain_getfullinfo( xc_handle, domid, /* FIXME */ 0,
- &info, &ctxt) )
+
+ if ( xc_domain_getinfo(xc_handle, domid, 1, &info) )
{
xcio_error(ioctxt, "Could not get full domain info");
goto out;
}
+ if ( xc_domain_get_vcpu_context( xc_handle, domid, /* FIXME */ 0,
+ &ctxt) )
+ {
+ xcio_error(ioctxt, "Could not get vcpu context");
+ goto out;
+ }
shared_info_frame = info.shared_info_frame;
/* A cheesy test to see whether the domain contains valid state. */
@@ -459,7 +469,7 @@
goto out;
}
- nr_pfns = info.max_pages;
+ nr_pfns = info.nr_pages;
/* cheesy sanity check */
if ( nr_pfns > 1024*1024 ){
diff -Nru a/tools/libxc/xc_plan9_build.c b/tools/libxc/xc_plan9_build.c
--- a/tools/libxc/xc_plan9_build.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_plan9_build.c 2005-05-09 23:19:24 -07:00
@@ -440,17 +440,21 @@
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t) domid;
- op.u.getdomaininfo.exec_domain = 0;
- op.u.getdomaininfo.ctxt = ctxt;
if ((do_dom0_op(xc_handle, &op) < 0) ||
((u32) op.u.getdomaininfo.domain != domid)) {
PERROR("Could not get info on domain");
goto error_out;
}
DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages));
+
+ if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) )
+ {
+ PERROR("Could not get vcpu context");
+ goto error_out;
+ }
if (!(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED)
- || (op.u.getdomaininfo.ctxt->pt_base != 0)) {
+ || (ctxt->pt_base != 0)) {
ERROR("Domain is already constructed");
goto error_out;
}
diff -Nru a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_private.c 2005-05-09 23:19:24 -07:00
@@ -173,17 +173,16 @@
{
dom0_op_t op;
- op.cmd = DOM0_GETDOMAININFO;
- op.u.getdomaininfo.domain = (domid_t)domid;
- op.u.getdomaininfo.exec_domain = (u16)vcpu;
- op.u.getdomaininfo.ctxt = NULL;
- if ( (do_dom0_op(xc_handle, &op) < 0) ||
- ((u16)op.u.getdomaininfo.domain != domid) )
+ op.cmd = DOM0_GETVCPUCONTEXT;
+ op.u.getvcpucontext.domain = (domid_t)domid;
+ op.u.getvcpucontext.exec_domain = (u16)vcpu;
+ op.u.getvcpucontext.ctxt = NULL;
+ if ( (do_dom0_op(xc_handle, &op) < 0) )
{
PERROR("Could not get info on domain");
return -1;
}
- return op.u.getdomaininfo.cpu_time;
+ return op.u.getvcpucontext.cpu_time;
}
@@ -258,8 +257,6 @@
dom0_op_t op;
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)domid;
- op.u.getdomaininfo.exec_domain = 0;
- op.u.getdomaininfo.ctxt = NULL;
return (do_dom0_op(xc_handle, &op) < 0) ?
-1 : op.u.getdomaininfo.tot_pages;
}
diff -Nru a/tools/libxc/xc_ptrace.c b/tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_ptrace.c 2005-05-09 23:19:24 -07:00
@@ -71,7 +71,7 @@
#define FETCH_REGS(cpu) \
if (!regs_valid[cpu]) \
{ \
- int retval = xc_domain_getfullinfo(xc_handle, domid, cpu, NULL,
&ctxt[cpu]); \
+ int retval = xc_domain_get_vcpu_context(xc_handle, domid, cpu,
&ctxt[cpu]); \
if (retval) \
goto error_out; \
cr3[cpu] = ctxt[cpu].pt_base; /* physical address */ \
@@ -221,7 +221,6 @@
{
dom0_op_t op;
int retval;
- vcpu_guest_context_t ctxt;
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 10*1000*1000;
@@ -234,12 +233,10 @@
}
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = domain;
- op.u.getdomaininfo.exec_domain = 0;
- op.u.getdomaininfo.ctxt = &ctxt;
retry:
retval = do_dom0_op(xc_handle, &op);
- if (retval) {
+ if (retval || op.u.getdomaininfo.domain != domain) {
printf("getdomaininfo failed\n");
goto done;
}
@@ -325,10 +322,8 @@
case PTRACE_ATTACH:
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = domid;
- op.u.getdomaininfo.exec_domain = 0;
- op.u.getdomaininfo.ctxt = NULL;
retval = do_dom0_op(xc_handle, &op);
- if (retval) {
+ if (retval || op.u.getdomaininfo.domain != domid) {
perror("dom0 op failed");
goto error_out;
}
diff -Nru a/tools/libxc/xc_vmx_build.c b/tools/libxc/xc_vmx_build.c
--- a/tools/libxc/xc_vmx_build.c 2005-05-09 23:19:24 -07:00
+++ b/tools/libxc/xc_vmx_build.c 2005-05-09 23:19:24 -07:00
@@ -543,14 +543,19 @@
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = (domid_t)domid;
- op.u.getdomaininfo.exec_domain = 0;
- op.u.getdomaininfo.ctxt = ctxt;
if ( (do_dom0_op(xc_handle, &op) < 0) ||
((u16)op.u.getdomaininfo.domain != domid) )
{
PERROR("Could not get info on domain");
goto error_out;
}
+
+ if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) )
+ {
+ PERROR("Could not get vcpu context");
+ goto error_out;
+ }
+
if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
(ctxt->pt_base != 0) )
{
diff -Nru a/tools/python/xen/lowlevel/xc/xc.c
b/tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c 2005-05-09 23:19:24 -07:00
+++ b/tools/python/xen/lowlevel/xc/xc.c 2005-05-09 23:19:24 -07:00
@@ -176,7 +176,7 @@
{
XcObject *xc = (XcObject *)self;
PyObject *list;
-
+ u32 cpu;
u32 first_dom = 0;
int max_doms = 1024, nr_doms, i;
xc_dominfo_t *info;
@@ -193,14 +193,18 @@
nr_doms = xc_domain_getinfo(xc->xc_handle, first_dom, max_doms, info);
list = PyList_New(nr_doms);
+
+
for ( i = 0 ; i < nr_doms; i++ )
{
+ /* XXX return the lowest numbered cpu in use */
+ cpu = ffs(info[i].processors);
PyList_SetItem(
list, i,
Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
",s:l,s:L,s:l,s:i}",
"dom", info[i].domid,
- "cpu", info[i].cpu,
+ "cpu", cpu,
"dying", info[i].dying,
"crashed", info[i].crashed,
"shutdown", info[i].shutdown,
diff -Nru a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c 2005-05-09 23:19:24 -07:00
+++ b/xen/common/dom0_ops.c 2005-05-09 23:19:24 -07:00
@@ -278,9 +278,16 @@
case DOM0_GETDOMAININFO:
{
- struct vcpu_guest_context *c;
- struct domain *d;
- struct exec_domain *ed;
+ struct domain *d;
+ struct exec_domain *ed;
+ u64 cpu_time = 0;
+ int vcpu_count = 0;
+ u32 processors = 0;
+ int flags = DOMFLAGS_PAUSED | DOMFLAGS_BLOCKED;
+
+#if MAX_VIRT_CPUS > 32
+#error "update processors field in GETDOMAININFO"
+#endif
read_lock(&domlist_lock);
@@ -300,35 +307,86 @@
read_unlock(&domlist_lock);
op->u.getdomaininfo.domain = d->id;
-
- if ( (op->u.getdomaininfo.exec_domain >= MAX_VIRT_CPUS) ||
- !d->exec_domain[op->u.getdomaininfo.exec_domain] )
- {
- ret = -EINVAL;
- break;
- }
- ed = d->exec_domain[op->u.getdomaininfo.exec_domain];
+ /*
+ * - domain is marked as paused or blocked only if all its vcpus
+ * are paused or blocked
+ * - domain is marked as running if any of its vcpus is running
+ */
+
+ for_each_exec_domain(d, ed)
+ {
+ if (!((flags & DOMFLAGS_PAUSED) && test_bit(EDF_CTRLPAUSE,
&ed->flags)))
+ flags &= ~DOMFLAGS_PAUSED;
+ if (!((flags & DOMFLAGS_BLOCKED) && test_bit(EDF_BLOCKED,
&ed->flags)))
+ flags &= ~DOMFLAGS_BLOCKED;
+ flags |= (test_bit(EDF_RUNNING, &ed->flags) ? DOMFLAGS_RUNNING
: 0);
+
+ set_bit(ed->processor, &processors);
+ if ( ed->cpu_time > cpu_time )
+ cpu_time += ed->cpu_time;
+ vcpu_count++;
+ }
+ op->u.getdomaininfo.n_active_vcpus = vcpu_count;
+ op->u.getdomaininfo.cpu_time = cpu_time;
op->u.getdomaininfo.flags =
(test_bit( DF_DYING, &d->flags) ? DOMFLAGS_DYING : 0) |
(test_bit( DF_CRASHED, &d->flags) ? DOMFLAGS_CRASHED : 0) |
(test_bit( DF_SHUTDOWN, &d->flags) ? DOMFLAGS_SHUTDOWN : 0) |
- (test_bit(EDF_CTRLPAUSE, &ed->flags) ? DOMFLAGS_PAUSED : 0) |
- (test_bit(EDF_BLOCKED, &ed->flags) ? DOMFLAGS_BLOCKED : 0) |
- (test_bit(EDF_RUNNING, &ed->flags) ? DOMFLAGS_RUNNING : 0);
-
- op->u.getdomaininfo.flags |= ed->processor << DOMFLAGS_CPUSHIFT;
+ flags;
+
op->u.getdomaininfo.flags |=
d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
+ op->u.getdomaininfo.processors = processors;
op->u.getdomaininfo.tot_pages = d->tot_pages;
op->u.getdomaininfo.max_pages = d->max_pages;
- op->u.getdomaininfo.cpu_time = ed->cpu_time;
op->u.getdomaininfo.shared_info_frame =
__pa(d->shared_info) >> PAGE_SHIFT;
- if ( op->u.getdomaininfo.ctxt != NULL )
+
+ if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ ret = -EINVAL;
+
+ put_domain(d);
+ }
+ break;
+ case DOM0_GETVCPUCONTEXT:
+ {
+ struct vcpu_guest_context *c;
+ struct domain *d;
+ struct exec_domain *ed;
+ int active_index = 0;
+ int exec_domain_index;
+
+ exec_domain_index = op->u.getvcpucontext.exec_domain;
+ d = find_domain_by_id(op->u.getvcpucontext.domain);
+
+ if ( d == NULL )
+ {
+ ret = -ESRCH;
+ break;
+ }
+
+ if ( (exec_domain_index >= MAX_VIRT_CPUS) )
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ for_each_exec_domain(d, ed)
+ {
+ if ( exec_domain_index == active_index )
+ {
+ op->u.getvcpucontext.exec_domain = ed->id;
+ break;
+ }
+ active_index++;
+ }
+ op->u.getvcpucontext.cpu_time = ed->cpu_time;
+
+ if ( op->u.getvcpucontext.ctxt != NULL )
{
if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
{
@@ -345,7 +403,7 @@
if ( ed != current )
exec_domain_unpause(ed);
- if ( copy_to_user(op->u.getdomaininfo.ctxt, c, sizeof(*c)) )
+ if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
ret = -EINVAL;
xfree(c);
diff -Nru a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h 2005-05-09 23:19:24 -07:00
+++ b/xen/include/public/dom0_ops.h 2005-05-09 23:19:24 -07:00
@@ -70,7 +70,7 @@
typedef struct {
/* IN variables. */
domid_t domain; /* NB. IN/OUT variable. */
- u16 exec_domain;
+ u16 n_active_vcpus; /* # of vcpus currently active */
/* OUT variables. */
#define DOMFLAGS_DYING (1<<0) /* Domain is scheduled to die. */
#define DOMFLAGS_CRASHED (1<<1) /* Crashed domain; frozen for postmortem. */
@@ -83,7 +83,7 @@
#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */
#define DOMFLAGS_SHUTDOWNSHIFT 16
u32 flags;
- vcpu_guest_context_t *ctxt; /* NB. IN/OUT variable. */
+ u32 processors;
memory_t tot_pages;
memory_t max_pages;
memory_t shared_info_frame; /* MFN of shared_info struct */
@@ -342,6 +342,15 @@
u16 allow_access; /* allow or deny access to range? */
} dom0_ioport_permission_t;
+#define DOM0_GETVCPUCONTEXT 37
+typedef struct {
+ domid_t domain; /* domain to be affected */
+ u16 exec_domain; /* NB. IN: nth active cpu / OUT: actual
cpu # */
+ vcpu_guest_context_t *ctxt; /* NB. IN/OUT variable. */
+ u64 cpu_time;
+} dom0_getvcpucontext_t;
+
+
typedef struct {
u32 cmd;
u32 interface_version; /* DOM0_INTERFACE_VERSION */
@@ -373,6 +382,7 @@
dom0_perfccontrol_t perfccontrol;
dom0_microcode_t microcode;
dom0_ioport_permission_t ioport_permission;
+ dom0_getvcpucontext_t getvcpucontext;
} u;
} dom0_op_t;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|