[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] Some "make check" tests for xen/common



Signed-off-by: Tony Breeds <tony@xxxxxxxxxxxxxxxxxx>

----- Forwarded message from Rusty Russell <rusty@xxxxxxxxxxxxxxx> -----

To: Tony Breeds <tony@xxxxxxxxxxxxxxxxxx>
From: Rusty Russell <rusty@xxxxxxxxxxxxxxx>
Subject: [PATCH 3/3] Some "make check" tests for xen/common
Date: Fri, 09 Dec 2005 16:47:43 +1100

Some of these tests could probably be improved, but this is a first cut.
Not all the files are tested yet, just enough to know we're on the right
track.

Signed-off-by: Rusty Russell <rusty@xxxxxxxxxxxxxxx>

diff -urN --exclude=.hg --exclude='*~' --exclude='*.aux' 
xen-unstable.hg-mainline/xen/common/test/test_ac_timer.c 
xen-unstable.hg-check/xen/common/test/test_ac_timer.c
--- xen-unstable.hg-mainline/xen/common/test/test_ac_timer.c    1970-01-01 
10:00:00.000000000 +1000
+++ xen-unstable.hg-check/xen/common/test/test_ac_timer.c       2005-12-09 
16:33:19.000000000 +1100
@@ -0,0 +1,241 @@
+/*  Tests for timers.
+    Copyright (C) 2005 Rusty Russell IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "fake-support.h"
+
+#define AC_TIMER_SOFTIRQ                  0
+typedef void (*softirq_handler)(void);
+static softirq_handler _test_handler;
+static inline void open_softirq(int nr, softirq_handler handler)
+{
+       test_cond(nr == AC_TIMER_SOFTIRQ);
+       _test_handler = handler;
+}
+
+#include "../../include/xen/ac_timer.h"
+#include "../ac_timer.c"
+
+#define TEST_NUM_TIMERS 10
+
+struct test_timer
+{
+       struct ac_timer timer;
+       unsigned int timer_count;
+       unsigned long expiry;
+};
+static struct test_timer _test_timers[TEST_NUM_TIMERS];
+
+static void _test_timer(void *data)
+{
+       struct test_timer *t = data;
+
+       t->timer_count++;
+
+       /* Previous timer should have gone off before us. */
+       if (t != _test_timers)
+               test_cond(t[-1].timer_count == t[0].timer_count);
+
+       /* Should be about the right time. */
+       test_cond(t->expiry < NOW() + TIMER_SLOP);
+}
+
+static int _test_reprogram = 1;
+static int _test_reprogram_timeout = 1;
+int reprogram_ac_timer(s_time_t timeout)
+{
+       _test_reprogram_timeout = timeout;
+       return _test_reprogram;
+}
+
+static s_time_t _test_now = 0;
+s_time_t NOW(void)
+{
+       return _test_now;
+}
+
+static int _test_selfadd = 1;
+static void _test_timer_selfadd(void *data)
+{
+       if (_test_selfadd)
+               set_ac_timer(data, _test_now + TIMER_SLOP*2);
+}
+
+static int _test_smp_processor_id = 0;
+int smp_processor_id(void)
+{
+       return _test_smp_processor_id;
+}
+
+void printk(const char *fmt, ...)
+{
+       va_list arglist;
+
+       va_start(arglist, fmt);
+       vfprintf(stderr, fmt, arglist);
+       va_end(arglist);
+}
+
+int main(int argc, char *argv[])
+{
+       unsigned int i;
+       struct ac_timer timer;
+
+       parse_test_args(argc, argv);
+       register_suppression("../ac_timer.c", 128);
+
+       /* Initialize first. */
+       ac_timer_init();
+
+       /* Self-adding timer should work. */
+       _test_now = 0;
+       init_ac_timer(&timer, _test_timer_selfadd, &timer, 0);
+       fake_expect_xmalloc = 1;
+       set_ac_timer(&timer, 1);
+       _test_now = 1;
+       _test_handler();
+       test_cond(active_ac_timer(&timer));
+       /* Timer should still be running. */
+       test_cond(_test_reprogram_timeout != 0);
+       _test_selfadd = 0;
+       _test_now = LONG_MAX;
+       _test_handler();
+       test_cond(!active_ac_timer(&timer));
+       /* Timer should be stopped. */
+       test_cond(_test_reprogram_timeout == 0);
+
+       /* Adding timer on other CPU should work. */
+       _test_now = 0;
+       init_ac_timer(&timer, _test_timer_selfadd, &timer, 1);
+       fake_expect_xmalloc = 1;
+       set_ac_timer(&timer, 1);
+       test_cond(active_ac_timer(&timer));
+       /* Wrong CPU, will do nothing. */
+       _test_handler();
+       test_cond(active_ac_timer(&timer));
+       _test_smp_processor_id = 1;
+       _test_handler();
+       test_cond(!active_ac_timer(&timer));
+       _test_smp_processor_id = 0;
+
+       /* Create them in expiry order, with +/- 1.. */
+       for (i = 0; i < TEST_NUM_TIMERS; i++) {
+               _test_timers[i].timer_count = 0;
+               init_ac_timer(&_test_timers[i].timer, _test_timer,
+                             &_test_timers[i], 0);
+               _test_timers[i].expiry
+                       = LONG_MAX/(TEST_NUM_TIMERS/3+1)*((i+3)/3) + i%3;
+       }
+
+       /* Add them all. */
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               set_ac_timer(&_test_timers[i].timer, _test_timers[i].expiry);
+       /* They can be re-added (noop) */
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               set_ac_timer(&_test_timers[i].timer, _test_timers[i].expiry);
+
+       /* Delete in random order then re-add. */
+       for (i = 0; i < TEST_NUM_TIMERS/2; i++)
+               rem_ac_timer(&_test_timers[i*2].timer);
+       for (i = 0; i < TEST_NUM_TIMERS/2; i++)
+               rem_ac_timer(&_test_timers[i*2+1].timer);
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               test_cond(!active_ac_timer(&_test_timers[i].timer));
+
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               set_ac_timer(&_test_timers[i].timer, _test_timers[i].expiry);
+
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               test_cond(active_ac_timer(&_test_timers[i].timer));
+
+       /* Expire them in order. */
+       _test_now = 0;
+       for (i = 1; i <= TEST_NUM_TIMERS/3; i++) {
+               _test_now = LONG_MAX/(TEST_NUM_TIMERS/3+1)*i;
+               _test_handler();
+       }
+       _test_now = LONG_MAX;
+       _test_handler();
+
+       /* They must have all gone off */
+       for (i = 0; i < TEST_NUM_TIMERS; i++) {
+               test_cond(!active_ac_timer(&_test_timers[i].timer));
+               test_cond(_test_timers[i].timer_count == 1);
+       }
+
+       /* Add them in backwards order. */
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               set_ac_timer(&_test_timers[TEST_NUM_TIMERS-1-i].timer,
+                            _test_timers[TEST_NUM_TIMERS-1-i].expiry);
+
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               test_cond(active_ac_timer(&_test_timers[i].timer));
+
+       /* Expire them in order. */
+       _test_now = 0;
+       for (i = 1; i <= TEST_NUM_TIMERS/3; i++) {
+               _test_now = LONG_MAX/(TEST_NUM_TIMERS/3+1)*i;
+               _test_handler();
+       }
+       _test_now = LONG_MAX;
+       _test_handler();
+
+       /* They must have all gone off */
+       for (i = 0; i < TEST_NUM_TIMERS; i++) {
+               test_cond(!active_ac_timer(&_test_timers[i].timer));
+               test_cond(_test_timers[i].timer_count == 2);
+       }
+
+       /* Add them in "random" order. */
+       test_cond(TEST_NUM_TIMERS%2 == 0);
+       for (i = 0; i < TEST_NUM_TIMERS/2; i++)
+               set_ac_timer(&_test_timers[i*2].timer,
+                            _test_timers[i*2].expiry);
+
+       for (i = 0; i < TEST_NUM_TIMERS/2; i++)
+               set_ac_timer(&_test_timers[i*2+1].timer,
+                            _test_timers[i*2+1].expiry);
+
+       for (i = 0; i < TEST_NUM_TIMERS; i++)
+               test_cond(active_ac_timer(&_test_timers[i].timer));
+
+       /* Expire them in order. */
+       _test_now = 0;
+       for (i = 1; i <= TEST_NUM_TIMERS/3; i++) {
+               _test_now = LONG_MAX/(TEST_NUM_TIMERS/3+1)*i;
+               _test_handler();
+       }
+       _test_now = LONG_MAX;
+       _test_handler();
+
+       /* They must have all gone off */
+       for (i = 0; i < TEST_NUM_TIMERS; i++) {
+               test_cond(!active_ac_timer(&_test_timers[i].timer));
+               test_cond(_test_timers[i].timer_count == 3);
+       }
+
+       return 0;
+}
diff -urN --exclude=.hg --exclude='*~' --exclude='*.aux' 
xen-unstable.hg-mainline/xen/common/test/test_acm_ops.c 
xen-unstable.hg-check/xen/common/test/test_acm_ops.c
--- xen-unstable.hg-mainline/xen/common/test/test_acm_ops.c     1970-01-01 
10:00:00.000000000 +1000
+++ xen-unstable.hg-check/xen/common/test/test_acm_ops.c        2005-12-09 
16:33:19.000000000 +1100
@@ -0,0 +1,192 @@
+/*  Tests for acm_ops
+    Copyright (C) 2005 Tony Breeds IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#include "fake-support.h"
+
+#include "../../include/public/acm.h"
+#include "../../include/public/acm_ops.h"
+
+#define _TEST_ERROR -1
+
+/* BEGIN: Move to fake-include.h / fake .c */
+struct acm_ssid_domain {
+    ssidref_t ssidref;
+};
+/*   END: Move to fake-include.h / fake .c */
+
+static int _test_acm_error = 0;
+static int _test_find_error = 0;
+static struct domain *_test_domain;
+
+static int acm_set_policy(void *buf, u32 buf_size, int isuserbuffer)
+{
+       return !_test_acm_error?ACM_OK:_TEST_ERROR;
+}
+static int acm_get_policy(void *buf, u32 buf_size)
+{
+       return !_test_acm_error?ACM_OK:_TEST_ERROR;
+}
+static int acm_dump_statistics(void *buf, u16 buf_size)
+{
+       return !_test_acm_error?ACM_OK:_TEST_ERROR;
+}
+static int acm_get_ssid(ssidref_t ssidref, u8 *buf, u16 buf_size)
+{
+       return !_test_acm_error?ACM_OK:_TEST_ERROR;
+}
+static int acm_get_decision(ssidref_t ssidref1, ssidref_t ssidref2, 
+                            enum acm_hook_type hook)
+{
+       return !_test_acm_error?ACM_OK:_TEST_ERROR;
+}
+
+/* FIXME: Use fake.c's version. */
+struct domain *find_domain_by_id(domid_t dom)
+{
+       return !_test_find_error?_test_domain:NULL;
+}
+
+/* Kill printks */
+#define printkd(fmt, args...)
+#define printk(fmt, args...)   
+
+/* Force the non-trivial case */
+#define ACM_SECURITY
+
+/* Defined in "../acm_ops.c" */
+long do_acm_op(struct acm_op * u_acm_op);
+enum acm_operation;
+int acm_authorize_acm_ops(struct domain *d, enum acm_operation pops);
+
+/* Avoid ref counting */
+#define put_domain(d)  do { } while (0)
+
+#include "../acm_ops.c"
+
+int main(int argc, char *argv[])
+{
+       enum acm_operation acm_op;
+       struct acm_op *user_acm_op;
+
+       parse_test_args(argc, argv);
+
+       current = calloc(sizeof(struct vcpu), 1);
+       current->domain = calloc(sizeof(struct domain), 1);
+       user_acm_op = calloc(sizeof(struct acm_op), 1);
+
+       _test_domain = calloc(sizeof(struct domain), 1);
+       _test_domain->ssid = calloc(sizeof(struct acm_ssid_domain), 1);
+
+       /* Test acm_authorize_acm_ops */
+       fake_IS_PRIV_out = 0;
+       test_cond(acm_authorize_acm_ops(current->domain, acm_op) == 
+              -EPERM);
+
+       fake_IS_PRIV_out = 1;
+       test_cond(!acm_authorize_acm_ops(current->domain, acm_op));
+
+       /* Test do_acm_op */
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -EACCES);
+       user_acm_op->interface_version = ACM_INTERFACE_VERSION;
+
+       /* Arbtrary invalid command */
+       user_acm_op->cmd = -1;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+
+       user_acm_op->cmd = ACM_SETPOLICY;
+       test_cond(!do_acm_op(fake_to_user(user_acm_op)));
+       user_acm_op->cmd = ACM_GETPOLICY;
+       test_cond(!do_acm_op(fake_to_user(user_acm_op)));
+       user_acm_op->cmd = ACM_DUMPSTATS;
+       test_cond(!do_acm_op(fake_to_user(user_acm_op)));
+
+       _test_acm_error=1;
+       user_acm_op->cmd = ACM_SETPOLICY;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -EPERM);
+       user_acm_op->cmd = ACM_GETPOLICY;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -EPERM);
+       user_acm_op->cmd = ACM_DUMPSTATS;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -EPERM);
+       _test_acm_error=0;
+
+       user_acm_op->cmd = ACM_GETSSID;
+       user_acm_op->u.getssid.get_ssid_by = UNSET;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+
+       user_acm_op->u.getssid.get_ssid_by = SSIDREF;
+       test_cond(!do_acm_op(fake_to_user(user_acm_op)));
+
+       user_acm_op->u.getssid.get_ssid_by = DOMAINID;
+       test_cond(!do_acm_op(fake_to_user(user_acm_op)));
+       _test_find_error = 1;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+       _test_find_error = 0;
+       free(_test_domain->ssid);
+       _test_domain->ssid = NULL;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+
+       user_acm_op->u.getssid.get_ssid_by = SSIDREF;
+       _test_acm_error = 1;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == _TEST_ERROR);
+       _test_acm_error = 0;
+
+       _test_domain->ssid = calloc(sizeof(struct acm_ssid_domain), 1);
+
+       user_acm_op->cmd = ACM_GETDECISION;
+       user_acm_op->u.getdecision.get_decision_by1 = UNSET;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+
+       user_acm_op->u.getdecision.get_decision_by1 = SSIDREF;
+       user_acm_op->u.getdecision.get_decision_by2 = UNSET;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+
+       user_acm_op->u.getdecision.get_decision_by2 = SSIDREF;
+       test_cond(!do_acm_op(fake_to_user(user_acm_op)));
+
+       user_acm_op->u.getdecision.get_decision_by1 = DOMAINID;
+       user_acm_op->u.getdecision.get_decision_by2 = DOMAINID;
+       test_cond(!do_acm_op(fake_to_user(user_acm_op)));
+
+       _test_find_error = 1;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+
+       _test_find_error = 0;
+       free(_test_domain->ssid);
+       _test_domain->ssid = NULL;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+
+       user_acm_op->u.getdecision.get_decision_by1 = SSIDREF;
+       user_acm_op->u.getdecision.get_decision_by2 = SSIDREF;
+       _test_acm_error = 1;
+       test_cond(do_acm_op(fake_to_user(user_acm_op)) == -ESRCH);
+       _test_acm_error = 0;
+
+       free(_test_domain);
+       free(user_acm_op);
+       free(current->domain);
+       free(current);
+
+       return 0;
+}
diff -urN --exclude=.hg --exclude='*~' --exclude='*.aux' 
xen-unstable.hg-mainline/xen/common/test/test_bitmap.c 
xen-unstable.hg-check/xen/common/test/test_bitmap.c
--- xen-unstable.hg-mainline/xen/common/test/test_bitmap.c      1970-01-01 
10:00:00.000000000 +1000
+++ xen-unstable.hg-check/xen/common/test/test_bitmap.c 2005-12-09 
16:33:19.000000000 +1100
@@ -0,0 +1,344 @@
+/*  Tests for bitmap ops.
+    Copyright (C) 2005 Rusty Russell IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#include "fake-support.h"
+
+#include "../../include/xen/bitmap.h"
+#include "../bitmap.c"
+
+static void _test_set_bit(unsigned long *bitmap, int bit)
+{
+       bitmap[bit/BITS_PER_LONG] |= (1UL << (bit%BITS_PER_LONG));
+}
+
+static void _test_clear_bit(unsigned long *bitmap, int bit)
+{
+       bitmap[bit/BITS_PER_LONG] ^= (1UL << (bit%BITS_PER_LONG));
+}
+
+static void _test_bitmap_clone(unsigned long *dst, unsigned long *src, 
+                               int length)
+{
+       test_cond(length>0);
+       memcpy(dst, src, length);
+}
+
+static int _test_hweight(unsigned long *bitmap, int bits)
+{
+       int i;
+       int count = 0;
+
+       for(i=0; i<bits; i++) {
+               if (bitmap[i/BITS_PER_LONG] & (1UL << (i%BITS_PER_LONG))) {
+                       count++;
+               }
+       }
+
+       return count;
+}
+
+int main(int argc, char *argv[])
+{
+       unsigned long *bitmap1 = calloc(sizeof(long), 2);
+       unsigned long *bitmap2 = calloc(sizeof(long), 2);
+       long i;
+       /* Used in testing the *region() functions. Effectively 
+        * lg(BITS_PER_LONG) */
+       int order = 0; 
+
+       parse_test_args(argc, argv);
+
+       /* Test __bitmap_empty. */
+       test_cond(__bitmap_empty(bitmap1, BITS_PER_LONG*2));
+       for (i = BITS_PER_LONG*2-1; i >= 0; i--) {
+               _test_set_bit(bitmap1, i);
+               test_cond(__bitmap_empty(bitmap1, i));
+               test_cond(!__bitmap_empty(bitmap1, i+1));
+       }
+       /* Test __bitmap_full. */
+       bitmap1[0] = ULONG_MAX;
+       bitmap1[1] = ULONG_MAX;
+
+       test_cond(__bitmap_full(bitmap1, BITS_PER_LONG*2));
+       for (i = BITS_PER_LONG*2-1; i >= 0; i--) {
+               _test_clear_bit(bitmap1, i);
+               test_cond(__bitmap_full(bitmap1, i));
+               test_cond(!__bitmap_full(bitmap1, i+1));
+       }
+
+       /* Test __bitmap_equal. */
+       bitmap1[0] = 0xaa; /* 0+2+0+8+ 0+32+ 0+128 */
+       bitmap1[1] = 0UL;
+       bitmap2[0] = 0x55; /* 1+0+4+0+16+ 0+64+  0 */
+       bitmap2[1] = 0UL;
+
+       /* Setup alternatng bit pattern bitmaps */
+       for(i=0; i< 2; i++) {
+               int j;
+
+               for(j=0; j< sizeof(long); j++) {
+                       bitmap1[i] = (bitmap1[i] << CHAR_BIT) | 0xaa;
+                       bitmap2[i] = (bitmap2[i] << CHAR_BIT) | 0x55;
+               }
+       }
+
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               test_cond(!__bitmap_equal(bitmap1, bitmap2, i+1));
+               _test_set_bit(bitmap1, i);
+               _test_set_bit(bitmap2, i);
+               test_cond(__bitmap_equal(bitmap1, bitmap2, i+1));
+       }
+
+       /* Test __bitmap_compliment. */
+       bitmap1[0] = ULONG_MAX;
+       bitmap1[1] = ULONG_MAX;
+
+       __bitmap_complement(bitmap2, bitmap1, BITS_PER_LONG*2);
+       test_cond(__bitmap_empty(bitmap2,  BITS_PER_LONG*2));
+       for (i = BITS_PER_LONG*2-1; i >= 0; i--) {
+               _test_clear_bit(bitmap1, i);
+
+               __bitmap_complement(bitmap2, bitmap1, i);
+               test_cond(__bitmap_empty(bitmap2, i));
+
+               __bitmap_complement(bitmap1, bitmap2, i);
+               test_cond(__bitmap_full(bitmap1, i));
+       }
+
+       /* Test __bitmap_shift_right. */
+       bitmap1[0] = 0UL;
+       bitmap1[1] = 0UL;
+
+       _test_set_bit(bitmap1, BITS_PER_LONG*2-1);
+       for (i = BITS_PER_LONG*2-1; i > 0; i--) {
+               unsigned long *shifttest = calloc(sizeof(long), 2);
+
+               __bitmap_shift_right(bitmap2, bitmap1, 1, BITS_PER_LONG*2);
+               test_cond(__bitmap_empty(bitmap2, i-1));
+               test_cond(!__bitmap_empty(bitmap2, i));
+
+               _test_bitmap_clone(bitmap1, bitmap2, sizeof(long)*2);
+
+               /* Shift bitmap2 by the number of itterations of this loop
+                * executed so far.  The result should be the same as bitmap1
+                */
+               bitmap2[0] = 0;
+               bitmap2[1] = 0;
+               _test_set_bit(bitmap2, BITS_PER_LONG*2-1);
+               __bitmap_shift_right(shifttest, bitmap2, (BITS_PER_LONG*2)-i, 
+                                    BITS_PER_LONG*2);
+
+               test_cond(__bitmap_equal(shifttest, bitmap1, BITS_PER_LONG*2));
+               free(shifttest);
+       }
+
+       /* Test __bitmap_shift_left. */
+       bitmap1[0] = 0UL;
+       bitmap1[1] = 0UL;
+
+       _test_set_bit(bitmap1, 0);
+       for (i = 1; i < BITS_PER_LONG*2; i++) {
+               unsigned long *shifttest = calloc(sizeof(long), 2);
+
+               __bitmap_shift_left(bitmap2, bitmap1, 1, BITS_PER_LONG*2);
+               test_cond(__bitmap_empty(bitmap2, i));
+               test_cond(!__bitmap_empty(bitmap2, i+1));
+
+               _test_bitmap_clone(bitmap1, bitmap2, sizeof(long)*2);
+
+               bitmap2[0] = 0;
+               bitmap2[1] = 0;
+               _test_set_bit(bitmap2, 0);
+               __bitmap_shift_left(shifttest, bitmap2, i, BITS_PER_LONG*2);
+               test_cond(__bitmap_equal(shifttest, bitmap1, BITS_PER_LONG*2));
+               free(shifttest);
+       }
+
+       /* Test __bitmap_and. */
+       bitmap1[0] = ULONG_MAX;
+       bitmap1[1] = ULONG_MAX;
+       bitmap2[0] = 0UL;
+       bitmap2[1] = 0UL;
+
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               unsigned long *andtest = calloc(sizeof(long), 2);
+
+               _test_set_bit(bitmap2, i);
+               __bitmap_and(andtest, bitmap1, bitmap2, i+1);
+               test_cond(__bitmap_equal(andtest, bitmap2, i));
+
+               free(andtest);
+       }
+
+       /* Test __bitmap_or. */
+       bitmap1[0] = ULONG_MAX;
+       bitmap1[1] = ULONG_MAX;
+
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               unsigned long *ortest = calloc(sizeof(long), 2);
+
+               bitmap2[0] = 0UL;
+               bitmap2[1] = 0UL;
+               _test_set_bit(bitmap2, i);
+
+               __bitmap_or(ortest, bitmap2, bitmap2, i+1);
+               test_cond(__bitmap_equal(ortest, bitmap2, i+1));
+               __bitmap_or(ortest, bitmap2, bitmap1, i+1);
+               test_cond(__bitmap_equal(ortest, bitmap1, i+1));
+
+               free(ortest);
+       }
+
+       /* Test __bitmap_xor. */
+       bitmap1[0] = ULONG_MAX;
+       bitmap1[1] = ULONG_MAX;
+
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               unsigned long *xortest = calloc(sizeof(long), 2);
+               unsigned long *complement = calloc(sizeof(long), 2);
+
+               bitmap2[0] = 0UL;
+               bitmap2[1] = 0UL;
+               _test_set_bit(bitmap2, i);
+               __bitmap_complement(complement, bitmap2, i+1);
+               __bitmap_xor(xortest, bitmap1, bitmap2, i+1);
+
+               test_cond(__bitmap_equal(xortest, complement, i+1));
+
+               free(complement);
+               free(xortest);
+       }
+
+       /* Test __bitmap_andnot. */
+       bitmap1[0] = ULONG_MAX;
+       bitmap1[1] = ULONG_MAX;
+       bitmap2[0] = 0UL;
+       bitmap2[1] = 0UL;
+
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               unsigned long *andnottest = calloc(sizeof(long), 2);
+
+               __bitmap_andnot(andnottest, bitmap1, bitmap2, i+1);
+               test_cond(__bitmap_full(andnottest, i+1));
+
+               free(andnottest);
+       }
+
+       /* Test __bitmap_intersects. */
+       bitmap1[0] = 0UL;
+       bitmap1[1] = 0UL;
+       bitmap2[0] = 0UL;
+       bitmap2[1] = 0UL;
+
+       test_cond(!__bitmap_intersects(bitmap1, bitmap2, BITS_PER_LONG*2));
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               _test_set_bit(bitmap1, i);
+               __bitmap_complement(bitmap2, bitmap1, i+1);
+               test_cond(!__bitmap_intersects(bitmap1, bitmap2, i+1));
+       }
+
+       /* Test __bitmap_subset. */
+       bitmap1[0] = 0UL;
+       bitmap1[1] = 0UL;
+       bitmap2[0] = 0UL;
+       bitmap2[1] = 0UL;
+
+       test_cond(__bitmap_subset(bitmap1, bitmap2, BITS_PER_LONG*2));
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               _test_set_bit(bitmap1, i);
+               __bitmap_complement(bitmap2, bitmap1, i+1);
+               test_cond(!__bitmap_subset(bitmap1, bitmap2, i+1));
+
+       }
+
+       /* Test __bitmap_weight. */
+       bitmap1[0] = 0UL;
+       bitmap1[1] = 0UL;
+       for(i = 0; i < BITS_PER_LONG*2; i++) {
+               _test_set_bit(bitmap1, i);
+
+               test_cond(_test_hweight(bitmap1, BITS_PER_LONG*2) == i+1);
+               test_cond(__bitmap_weight(bitmap1, BITS_PER_LONG*2) ==
+                      _test_hweight(bitmap1, BITS_PER_LONG*2));
+       }
+
+       /* Test bitmap_find_free_region. */
+       i = BITS_PER_LONG;
+       while (i>1) {
+               i >>=1;
+               order++;
+       }
+
+       bitmap1[0] = 0UL;
+       bitmap1[1] = 0UL;
+       bitmap2[0] = ULONG_MAX;
+       bitmap2[1] = ULONG_MAX;
+
+       for(i=0; i<=order; i++) {
+               unsigned long *freetest = calloc(sizeof(long), 2);
+               int pos;
+
+               pos = bitmap_find_free_region(bitmap1, BITS_PER_LONG*2, i);
+               test_cond(pos >= 0);
+
+               __bitmap_shift_right(freetest, bitmap1, pos ,BITS_PER_LONG*2);
+               test_cond(__bitmap_full(freetest, i));
+
+               /* bitmap2 is busy this must fail */
+               pos = bitmap_find_free_region(bitmap2, BITS_PER_LONG*2, i);
+               test_cond(pos < 0);
+
+               free(freetest);
+       }
+       /* check for ((1<<order) > BITS_PER_LONG)  which results in -EINVAL */
+       test_cond(bitmap_find_free_region(bitmap1, BITS_PER_LONG*2, order+1) < 
0); 
+
+       /* Test bitmap_release_region and bitmap_allocate_region. */
+       bitmap1[0] = 0UL;
+       bitmap1[1] = 0UL;
+       bitmap2[0] = ULONG_MAX;
+       bitmap2[1] = ULONG_MAX;
+
+       for(i=0; i<=order; i++) {
+               int j;
+               for(j=0; j < BITS_PER_LONG*2; j++) {
+                       test_cond(!bitmap_allocate_region(bitmap1, j, i));
+                       test_cond(!__bitmap_empty(bitmap1, BITS_PER_LONG*2));
+
+                       bitmap_release_region(bitmap1, j, i);
+                       test_cond(__bitmap_empty(bitmap1, BITS_PER_LONG*2));
+
+                       /* bitmap2 is busy this must fail */
+                       test_cond(bitmap_allocate_region(bitmap2, j, i) <0);
+               }
+       }
+       
+
+       bitmap1[0] = 0UL;
+       
+       free(bitmap1);
+       free(bitmap2);
+       return 0;
+}
diff -urN --exclude=.hg --exclude='*~' --exclude='*.aux' 
xen-unstable.hg-mainline/xen/common/test/test_dom0_ops.c 
xen-unstable.hg-check/xen/common/test/test_dom0_ops.c
--- xen-unstable.hg-mainline/xen/common/test/test_dom0_ops.c    1970-01-01 
10:00:00.000000000 +1000
+++ xen-unstable.hg-check/xen/common/test/test_dom0_ops.c       2005-12-09 
16:33:19.000000000 +1100
@@ -0,0 +1,644 @@
+/*  Tests for dom0 ops.
+    Copyright (C) 2005 Rusty Russell IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+#define _GNU_SOURCE
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+#include <fake-support.h>
+
+
+#include "../../include/public/dom0_ops.h"
+
+static int _test_acm_pre_dom0_op = 0;
+static int acm_pre_dom0_op(dom0_op_t *op, void **ssid) 
+{
+       return _test_acm_pre_dom0_op;
+}
+
+static inline void acm_post_dom0_op(dom0_op_t *op, void *ssid) 
+{ return; }
+
+static inline void acm_fail_dom0_op(dom0_op_t *op, void *ssid) 
+{ return; }
+
+static int _test_set_info_guest_out;
+static int set_info_guest(struct domain *d,
+                         dom0_setdomaininfo_t *setdomaininfo)
+{
+       return _test_set_info_guest_out;
+}
+
+static int _test_count_domain_pause_by_systemcontroller;
+void domain_pause_by_systemcontroller(struct domain *d);
+void domain_pause_by_systemcontroller(struct domain *d)
+{
+       _test_count_domain_pause_by_systemcontroller++;
+}
+
+static int _test_count_domain_unpause_by_systemcontroller;
+void domain_unpause_by_systemcontroller(struct domain *d);
+void domain_unpause_by_systemcontroller(struct domain *d)
+{
+       _test_count_domain_unpause_by_systemcontroller++;
+}
+
+static struct domain *_test_domain_kill;
+void domain_kill(struct domain *d);
+void domain_kill(struct domain *d)
+{
+       _test_domain_kill = d;
+}
+       
+#define for_each_vcpu(_d,_v)                    \
+ for ( (_v) = (_d)->vcpu[0];                    \
+       (_v) != NULL;                            \
+       (_v) = (_v)->next_in_list )
+
+ /* Has the FPU been initialised? */
+#define _VCPUF_fpu_initialised 0
+#define VCPUF_fpu_initialised  (1UL<<_VCPUF_fpu_initialised)
+ /* Has the FPU been used since it was last saved? */
+#define _VCPUF_fpu_dirtied     1
+#define VCPUF_fpu_dirtied      (1UL<<_VCPUF_fpu_dirtied)
+ /* Domain is blocked waiting for an event. */
+#define _VCPUF_blocked         2
+#define VCPUF_blocked          (1UL<<_VCPUF_blocked)
+ /* Currently running on a CPU? */
+#define _VCPUF_running         3
+#define VCPUF_running          (1UL<<_VCPUF_running)
+ /* Disables auto-migration between CPUs. */
+#define _VCPUF_cpu_pinned      4
+#define VCPUF_cpu_pinned       (1UL<<_VCPUF_cpu_pinned)
+ /* Domain migrated between CPUs. */
+#define _VCPUF_cpu_migrated    5
+#define VCPUF_cpu_migrated     (1UL<<_VCPUF_cpu_migrated)
+ /* Initialization completed. */
+#define _VCPUF_initialised     6
+#define VCPUF_initialised      (1UL<<_VCPUF_initialised)
+ /* VCPU is not-runnable */
+#define _VCPUF_down            7
+#define VCPUF_down             (1UL<<_VCPUF_down)
+
+/*
+ * Per-domain flags (domain_flags).
+ */
+ /* Is this one of the per-CPU idle domains? */
+#define _DOMF_idle_domain      0
+#define DOMF_idle_domain       (1UL<<_DOMF_idle_domain)
+ /* Is this domain privileged? */
+#define _DOMF_privileged       1
+#define DOMF_privileged        (1UL<<_DOMF_privileged)
+ /* May this domain do IO to physical devices? */
+#define _DOMF_physdev_access   2
+#define DOMF_physdev_access    (1UL<<_DOMF_physdev_access)
+ /* Guest shut itself down for some reason. */
+#define _DOMF_shutdown         3
+#define DOMF_shutdown          (1UL<<_DOMF_shutdown)
+ /* Guest is in process of shutting itself down (becomes DOMF_shutdown). */
+#define _DOMF_shuttingdown     4
+#define DOMF_shuttingdown      (1UL<<_DOMF_shuttingdown)
+ /* Death rattle. */
+#define _DOMF_dying            5
+#define DOMF_dying             (1UL<<_DOMF_dying)
+ /* Domain is paused by controller software. */
+#define _DOMF_ctrl_pause       6
+#define DOMF_ctrl_pause        (1UL<<_DOMF_ctrl_pause)
+
+#define ACM_DEFAULT_SSID  0x0
+
+typedef uint32_t ssidref_t;
+struct acm_ssid_domain {
+    ssidref_t ssidref;   /* combined security reference */
+};
+
+#define __pa(x) ((unsigned long)(x) + 1001)
+#define __va(x) ((unsigned long)(x) - 1001)
+#define PAGE_SHIFT 10
+#define for_each_domain(_d)                     \
+ for ( (_d) = current->domain;                  \
+       (_d) != NULL;                            \
+       (_d) = NULL )
+#define smp_num_siblings 1
+#define num_online_cpus() 1
+rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
+
+static struct vcpu *_test_alloc_vcpu_out;
+static struct vcpu *alloc_vcpu(struct domain *d, unsigned int vcpu_id,
+                              unsigned int cpu_id)
+{
+       return _test_alloc_vcpu_out;
+}
+
+static struct vcpu *_test_new_vcpu(void)
+{
+       struct vcpu *vcpu = malloc(sizeof(struct vcpu));
+
+       vcpu->processor = 0;
+       vcpu->next_in_list = NULL;
+       return vcpu;
+}
+
+static struct domain *_test_new_domain(void)
+{
+       struct domain *d = malloc(sizeof(struct domain));
+       atomic_set(&d->refcnt, 0);
+       memset(d->vcpu, 0, sizeof(d->vcpu));
+       d->vcpu[0] = _test_new_vcpu();
+       return d;
+}
+
+static struct domain *_test_do_createdomain_out;
+static domid_t _test_do_createdomain_domid;
+static unsigned int _test_do_createdomain_cpu;
+static struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       _test_do_createdomain_domid = dom_id;
+       _test_do_createdomain_cpu = cpu;
+       if (_test_do_createdomain_out)
+               _test_do_createdomain_out->domain_id = dom_id;
+       return _test_do_createdomain_out;
+}
+
+static long test_arch_do_dom0_op_out;
+static long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       return test_arch_do_dom0_op_out;
+}
+
+static unsigned int find_first_set_bit(unsigned long word)
+{
+       return ffsl(word);
+}
+
+static struct vcpu *test_vcpu_pause_in_v;
+static void vcpu_pause(struct vcpu *v)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       test_vcpu_pause_in_v = v;
+}
+
+static struct vcpu *test_arch_getdomaininfo_ctxt_in_vcpu;
+static void arch_getdomaininfo_ctxt(struct vcpu *vcpu,
+                                   struct vcpu_guest_context *ctx)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       test_arch_getdomaininfo_ctxt_in_vcpu = vcpu;
+}
+
+static int test_vcpu_migrate_cpu_in_newcpu;
+static void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       assert(v == test_vcpu_pause_in_v);
+       test_vcpu_migrate_cpu_in_newcpu = newcpu;
+}
+
+static void vcpu_unpause(struct vcpu *v)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       assert(v == test_vcpu_pause_in_v);
+}
+
+static long sched_ctl(struct sched_ctl_cmd *cmd)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       memset(cmd, 0, sizeof(cmd));
+       return 102;
+}
+
+static long sched_adjdom(struct sched_adjdom_cmd *cmd)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       memset(cmd, 0, sizeof(cmd));
+       return 103;
+}
+
+static unsigned long test_do_settime_in_secs;
+static unsigned long test_do_settime_in_nsecs;
+static unsigned long test_do_settime_in_system_time_base;
+static void do_settime(unsigned long secs, unsigned long nsecs,
+                      u64 system_time_base)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       test_do_settime_in_secs = secs;
+       test_do_settime_in_nsecs = nsecs;
+       test_do_settime_in_system_time_base = system_time_base;
+}
+
+static int test_tb_control_out;
+static int tb_control(dom0_tbufcontrol_t *tbc)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       return test_tb_control_out;
+}
+
+static long test_read_console_ring_out;
+static int test_read_console_ring_in_clear;
+static long read_console_ring(char **pstr, u32 *pcount, int clear)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       (*pstr)++;
+       **pstr = 'A';
+       *pcount = 1;
+       test_read_console_ring_in_clear = clear;
+       return test_read_console_ring_out;
+}
+
+static int test_sched_id_out;
+static int sched_id(void)
+{
+       fake_must_have_spinlock("&dom0_lock");
+       return test_sched_id_out;
+}
+
+/* Prototype to suppress warning (usually only called from asm) */
+long do_dom0_op(dom0_op_t *u_dom0_op);
+
+#include "../../include/public/dom0_ops.h"
+#include "../dom0_ops.c"
+
+/* Use dynamic allocation so valgrind can find problems. */
+static dom0_op_t *_test_op(int cmd)
+{
+       dom0_op_t *op = malloc(sizeof(*op));
+       op->interface_version = DOM0_INTERFACE_VERSION;
+       op->cmd = cmd;
+       return op;
+}
+
+int main(int argc, char *argv[])
+{
+       struct domain *me;
+       dom0_op_t *op;
+
+       parse_test_args(argc, argv);
+
+       me = _test_new_domain();
+       current = me->vcpu[0];
+       current->domain = me;
+       me->domain_flags = 0;
+       atomic_inc(&me->refcnt);
+
+       /* Unpriv'd domains should be immediately shown the door. */
+       fake_IS_PRIV_out = 0;
+       test_cond(do_dom0_op(fake_to_user(NULL)) == -EPERM);
+       test_cond(fake_lock_count == 0);
+
+       /* Wrong version should be failed without looking at command. */
+       fake_IS_PRIV_out = 1;
+       op = malloc(sizeof(*op));
+       op->interface_version = DOM0_INTERFACE_VERSION - 1;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EACCES);
+       test_cond(fake_lock_count == 0);
+
+       /* ACM hook should result in immediate refusal, too. */
+       op->interface_version = DOM0_INTERFACE_VERSION;
+       _test_acm_pre_dom0_op = 1;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EPERM);
+       test_cond(fake_lock_count == 0);
+       _test_acm_pre_dom0_op = 0;
+
+       op = _test_op(DOM0_SETDOMAININFO);
+       op->u.setdomaininfo.domain = 100;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = current->domain;
+       _test_set_info_guest_out = -1000;
+       test_cond(do_dom0_op(fake_to_user(op)) == -1000);
+       test_cond(fake_lock_count == 0);
+
+       op = _test_op(DOM0_PAUSEDOMAIN);
+       op->u.pausedomain.domain = 101;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = current->domain;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = _test_new_domain();
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(_test_count_domain_pause_by_systemcontroller == 1);
+       test_cond(fake_lock_count == 0);
+
+       op = _test_op(DOM0_UNPAUSEDOMAIN);
+       op->u.unpausedomain.domain = 101;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = current->domain;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = _test_new_domain();
+       fake_find_domain_out->vcpu[0]->vcpu_flags = 0;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = _test_new_domain();
+       fake_find_domain_out->vcpu[0]->vcpu_flags = (1 << _VCPUF_initialised);
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(fake_lock_count == 0);
+
+       op = _test_op(DOM0_CREATEDOMAIN);
+       op->u.createdomain.domain = 101;
+       fake_find_domain_out = current->domain;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       fake_find_domain_out = NULL;
+       _test_do_createdomain_out = _test_new_domain();
+       /* Not fair to test failing very first allocation. */
+       op->u.createdomain.domain = 0;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(op->u.createdomain.domain == 1);
+       test_cond(_test_do_createdomain_domid == 1);
+       fake_find_domain_out = current->domain;
+       op->u.createdomain.domain = 0;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ENOMEM);
+       fake_find_domain_out = current->domain;
+       fake_find_domain_out = NULL;
+       _test_do_createdomain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ENOMEM);
+       _test_do_createdomain_out = _test_new_domain();
+       /* It copies back to us, so we must set valid bits. */
+       memset(op, 0, sizeof(*op));
+       op->cmd = DOM0_CREATEDOMAIN;
+       op->interface_version = DOM0_INTERFACE_VERSION;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       fake_check_memory(op, sizeof(*op));
+       test_cond(_test_do_createdomain_domid != 0);
+       test_cond(_test_do_createdomain_domid == op->u.createdomain.domain);
+       test_cond(_test_do_createdomain_cpu == 0);
+       op->u.createdomain.domain = 101;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       fake_check_memory(op, sizeof(*op));
+       test_cond(_test_do_createdomain_domid == 101);
+       test_cond(op->u.createdomain.domain == 101);
+       test_cond(_test_do_createdomain_cpu == 0);
+
+       op = _test_op(DOM0_MAX_VCPUS);
+       op->u.max_vcpus.max = MAX_VIRT_CPUS+1;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       test_cond(fake_lock_count == 0);
+       op->u.max_vcpus.max = MAX_VIRT_CPUS;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = _test_new_domain();
+       fake_find_domain_out->vcpu[0]->vcpu_flags = (1 << _VCPUF_initialised);
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = _test_new_domain();
+       fake_find_domain_out->vcpu[0]->vcpu_flags = 0;
+       _test_alloc_vcpu_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ENOMEM);
+       test_cond(fake_lock_count == 0);
+       _test_alloc_vcpu_out = _test_new_vcpu();
+       fake_find_domain_out = _test_new_domain();
+       atomic_inc(&fake_find_domain_out->refcnt);
+       fake_find_domain_out->vcpu[0] = _test_new_vcpu();
+       fake_find_domain_out->vcpu[0]->vcpu_flags = 0;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(fake_lock_count == 0);
+       op->u.max_vcpus.max = 0;
+       _test_alloc_vcpu_out = _test_new_vcpu();
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       test_cond(fake_lock_count == 0);
+
+       op = _test_op(DOM0_DESTROYDOMAIN);
+       op->u.destroydomain.domain = 101;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = current->domain;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       test_cond(fake_lock_count == 0);
+       fake_find_domain_out = _test_new_domain();
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(fake_lock_count == 0);
+       test_cond(_test_domain_kill == fake_find_domain_out);
+
+       op = _test_op(DOM0_PINCPUDOMAIN);
+       op->u.pincpudomain.domain = 101;
+       op->u.pincpudomain.vcpu = 0;
+       op->u.pincpudomain.cpumap = 1;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       fake_find_domain_out = current->domain;
+       op->u.pincpudomain.vcpu = MAX_VIRT_CPUS;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       op->u.pincpudomain.vcpu = 1;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       /* FIXME: Unreachable ESRCH return --RR */
+       op->u.pincpudomain.vcpu = 0;
+       current->domain->vcpu[0] = current;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       current->domain->vcpu[0] = _test_new_vcpu();
+       current->domain->vcpu[0]->vcpu_flags = (1<<_VCPUF_cpu_pinned);
+       op->u.pincpudomain.cpumap = CPUMAP_RUNANYWHERE;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(current->domain->vcpu[0]->vcpu_flags == 0);
+       op->u.pincpudomain.cpumap = 1;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(current->domain->vcpu[0]->vcpu_flags
+                 == (1<<_VCPUF_cpu_pinned));
+       test_cond(test_vcpu_pause_in_v == current->domain->vcpu[0]);
+       test_cond(test_vcpu_migrate_cpu_in_newcpu == 0);
+       
+       op = _test_op(DOM0_SCHEDCTL);
+       test_cond(do_dom0_op(fake_to_user(op)) == 102);
+
+       op = _test_op(DOM0_ADJUSTDOM);
+       test_cond(do_dom0_op(fake_to_user(op)) == 103);
+
+       op = _test_op(DOM0_GETDOMAININFO);
+       current->domain->vcpu[0] = current;
+       current->domain->domain_id = 1;
+       op->u.getdomaininfo.domain = 2;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       op->u.getdomaininfo.domain = 1;
+       fake_get_domain_out = 0;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       test_cond(fake_get_domain_in_d == current->domain);
+       fake_get_domain_out = 1;
+       memset(&op->u.getdomaininfo, 0, sizeof(op->u.getdomaininfo));
+       current->domain->tot_pages = 104;
+       current->domain->max_pages = 105;
+       current->domain->shared_info = (void *)__va(106 << PAGE_SHIFT);
+       current->cpu_time = 107;
+       current->vcpu_id = 1;
+       current->vcpu_flags = VCPUF_running;
+       current->domain->ssid = NULL;
+       current->domain->shutdown_code = 0;
+       memset(current->domain->handle, 0, sizeof(current->domain->handle));
+       strcpy((char *)current->domain->handle, "handle");
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       fake_check_memory(&op->u.getdomaininfo, sizeof(op->u.getdomaininfo));
+       test_cond(op->u.getdomaininfo.domain == 1);
+       test_cond(op->u.getdomaininfo.flags == DOMFLAGS_RUNNING);
+       test_cond(op->u.getdomaininfo.tot_pages == 104);
+       test_cond(op->u.getdomaininfo.max_pages == 105);
+       test_cond(op->u.getdomaininfo.nr_online_vcpus == 1);
+       test_cond(op->u.getdomaininfo.shared_info_frame == 106);
+       test_cond(op->u.getdomaininfo.cpu_time == 107);
+       test_cond(op->u.getdomaininfo.nr_online_vcpus == 1);
+       test_cond(op->u.getdomaininfo.max_vcpu_id == 1);
+       test_cond(op->u.getdomaininfo.ssidref == ACM_DEFAULT_SSID);
+       test_cond(strcmp((char *)op->u.getdomaininfo.handle, "handle") == 0);
+
+       op = _test_op(DOM0_GETDOMAININFOLIST);
+       {
+               dom0_getdomaininfo_t *buffer = malloc(sizeof(*buffer));
+
+               op->u.getdomaininfolist.buffer = fake_to_user(buffer);
+               op->u.getdomaininfolist.first_domain = 0;
+               op->u.getdomaininfolist.max_domains = 0;
+               /* FIXME: Redundant NULL test in for_each_domain(). */
+               test_cond(do_dom0_op(fake_to_user(op)) == 0);
+               test_cond(op->u.getdomaininfolist.num_domains == 0);
+               op->u.getdomaininfolist.max_domains = 1;
+               test_cond(do_dom0_op(fake_to_user(op)) == 0);
+               test_cond(op->u.getdomaininfolist.num_domains == 1);
+               /* FIXME: Contains 2 uninitialized bytes, but we're dom0. */
+               /* fake_check_memory(buffer, sizeof(*buffer)); */
+               test_cond(buffer->domain == 1);
+               test_cond(buffer->flags == DOMFLAGS_RUNNING);
+               test_cond(buffer->tot_pages == 104);
+               test_cond(buffer->max_pages == 105);
+               test_cond(buffer->shared_info_frame == 106);
+               test_cond(buffer->cpu_time == 107);
+               test_cond(buffer->nr_online_vcpus == 1);
+               test_cond(buffer->max_vcpu_id == 1);
+               test_cond(buffer->ssidref == ACM_DEFAULT_SSID);
+               test_cond(memcmp(buffer->handle, current->domain->handle,
+                                sizeof(buffer->handle)) == 0);
+       }
+
+       op = _test_op(DOM0_GETVCPUCONTEXT);
+       op->u.getvcpucontext.domain = 101;
+       op->u.getvcpucontext.vcpu = 0;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       fake_find_domain_out = current->domain;
+       op->u.getvcpucontext.vcpu = MAX_VIRT_CPUS;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       op->u.getvcpucontext.vcpu = 1;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       op->u.getvcpucontext.vcpu = 0;
+       test_vcpu_pause_in_v = NULL;
+       {
+               vcpu_guest_context_t *ctxt = malloc(sizeof *ctxt);
+               op->u.getvcpucontext.ctxt = fake_to_user(ctxt);
+               test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       }
+       test_cond(test_arch_getdomaininfo_ctxt_in_vcpu == current);
+       test_cond(test_vcpu_pause_in_v == NULL);
+       /* Non-current vcpu must be paused. */
+       fake_find_domain_out = _test_new_domain();
+       atomic_inc(&fake_find_domain_out->refcnt);
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(test_arch_getdomaininfo_ctxt_in_vcpu
+                 == fake_find_domain_out->vcpu[0]);
+       test_cond(test_vcpu_pause_in_v == fake_find_domain_out->vcpu[0]);
+
+       
+       op = _test_op(DOM0_GETVCPUINFO);
+       op->u.getvcpuinfo.domain = 101;
+       op->u.getvcpuinfo.vcpu = 0;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       fake_find_domain_out = current->domain;
+       op->u.getvcpuinfo.vcpu = MAX_VIRT_CPUS;
+       test_cond(do_dom0_op(fake_to_user(op)) == -EINVAL);
+       op->u.getvcpuinfo.vcpu = 1;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       op->u.getvcpuinfo.vcpu = 0;
+       current->cpu_time = 108;
+       current->processor = 1;
+       current->cpumap = 7;
+       current->vcpu_flags = (1 << _VCPUF_running);
+       op->u.getvcpuinfo.vcpu = 0;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(op->u.getvcpuinfo.online);
+       test_cond(!op->u.getvcpuinfo.blocked);
+       test_cond(op->u.getvcpuinfo.running);
+       test_cond(op->u.getvcpuinfo.cpu_time == 108);
+       test_cond(op->u.getvcpuinfo.cpu == 1);
+       test_cond(op->u.getvcpuinfo.cpumap == 7);
+
+       op = _test_op(DOM0_SETTIME);
+       op->u.settime.secs = 109;
+       op->u.settime.nsecs = 110;
+       op->u.settime.system_time = 111;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(test_do_settime_in_secs == 109);
+       test_cond(test_do_settime_in_nsecs == 110);
+       test_cond(test_do_settime_in_system_time_base == 111);
+
+       op = _test_op(DOM0_TBUFCONTROL);
+       test_tb_control_out = 112;
+       test_cond(do_dom0_op(fake_to_user(op)) == 112);
+
+       op = _test_op(DOM0_READCONSOLE);
+       {
+               char buf[2];
+               test_read_console_ring_out = 113; 
+               op->u.readconsole.buffer = buf;
+               op->u.readconsole.count = 100;
+               op->u.readconsole.clear = 1;
+               test_cond(do_dom0_op(fake_to_user(op)) == 113);
+               test_cond(test_read_console_ring_in_clear == 1);
+               test_cond(buf[1] == 'A');
+               test_cond(op->u.readconsole.buffer == buf+1);
+               test_cond(op->u.readconsole.count == 1);
+       }
+
+       op = _test_op(DOM0_SCHED_ID);
+       test_sched_id_out = 114;
+       /* FIXME: Doesn't return -EFAULT on copy_to_user failing. */
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(op->u.sched_id.sched_id == 114);
+
+       op = _test_op(DOM0_SETDOMAINMAXMEM);
+       op->u.setdomainmaxmem.domain = 115;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       fake_find_domain_out = current->domain;
+       op->u.setdomainmaxmem.max_memkb = 116;
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(current->domain->max_pages == 116*1024/(1<<PAGE_SHIFT));
+
+       op = _test_op(DOM0_SETDOMAINHANDLE);
+       op->u.setdomainhandle.domain = 117;
+       fake_find_domain_out = NULL;
+       test_cond(do_dom0_op(fake_to_user(op)) == -ESRCH);
+       fake_find_domain_out = current->domain;
+       strcpy((char *)op->u.setdomainhandle.handle, "handle2");
+       test_cond(do_dom0_op(fake_to_user(op)) == 0);
+       test_cond(strcmp((char *)current->domain->handle, "handle2") == 0);
+
+       op = _test_op(10001);
+       test_arch_do_dom0_op_out = 118;
+       test_cond(do_dom0_op(fake_to_user(op)) == 118);
+       return 0;
+}
diff -urN --exclude=.hg --exclude='*~' --exclude='*.aux' 
xen-unstable.hg-mainline/xen/common/test/test_domain.c 
xen-unstable.hg-check/xen/common/test/test_domain.c
--- xen-unstable.hg-mainline/xen/common/test/test_domain.c      1970-01-01 
10:00:00.000000000 +1000
+++ xen-unstable.hg-check/xen/common/test/test_domain.c 2005-12-09 
16:33:19.000000000 +1100
@@ -0,0 +1,832 @@
+/*  Tests for domain.c
+    Copyright (C) 2005 Tony Breeds IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#include "fake-support.h"
+
+/* BEGIN: Stuff for fake */
+/* Attribute tags */
+
+#define DOMAIN_HASH_SIZE 256
+#define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
+
+/*
+ * Commands to HYPERVISOR_vm_assist().
+ */
+#define VMASST_CMD_enable                0
+#define VMASST_CMD_disable               1
+#define VMASST_TYPE_4gb_segments         0
+#define VMASST_TYPE_4gb_segments_notify  1
+#define VMASST_TYPE_writable_pagetables  2
+#define MAX_VMASST_TYPE 2
+
+#define for_each_domain(_d)                     \
+ for ( (_d) = domain_list;                      \
+       (_d) != NULL;                            \
+       (_d) = (_d)->next_in_list )
+
+#define for_each_vcpu(_d,_v)                    \
+ for ( (_v) = (_d)->vcpu[0];                    \
+       (_v) != NULL;                            \
+       (_v) = (_v)->next_in_list )
+
+/*
+ * Per-VCPU flags (vcpu_flags).
+ */
+ /* Has the FPU been initialised? */
+#define _VCPUF_fpu_initialised 0
+#define VCPUF_fpu_initialised  (1UL<<_VCPUF_fpu_initialised)
+ /* Has the FPU been used since it was last saved? */
+#define _VCPUF_fpu_dirtied     1
+#define VCPUF_fpu_dirtied      (1UL<<_VCPUF_fpu_dirtied)
+ /* Domain is blocked waiting for an event. */
+#define _VCPUF_blocked         2
+#define VCPUF_blocked          (1UL<<_VCPUF_blocked)
+ /* Currently running on a CPU? */
+#define _VCPUF_running         3
+#define VCPUF_running          (1UL<<_VCPUF_running)
+ /* Disables auto-migration between CPUs. */
+#define _VCPUF_cpu_pinned      4
+#define VCPUF_cpu_pinned       (1UL<<_VCPUF_cpu_pinned)
+ /* Domain migrated between CPUs. */
+#define _VCPUF_cpu_migrated    5
+#define VCPUF_cpu_migrated     (1UL<<_VCPUF_cpu_migrated)
+ /* Initialization completed. */
+#define _VCPUF_initialised     6
+#define VCPUF_initialised      (1UL<<_VCPUF_initialised)
+ /* VCPU is not-runnable */
+#define _VCPUF_down            7
+#define VCPUF_down             (1UL<<_VCPUF_down)
+
+#define VCPUOP_initialise           0
+#define VCPUOP_up                   1
+#define VCPUOP_down                 2
+#define VCPUOP_is_up                3
+
+
+#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+       return *srcp == 0UL;
+}
+
+#define LOCK_BIGLOCK(_d) spin_lock_recursive(&(_d)->big_lock)
+#define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
+static void cleanup_writable_pagetable(struct domain *d) {}
+#define sync_pagetable_state(d)                 \
+    do {                                        \
+        LOCK_BIGLOCK(d);                        \
+        cleanup_writable_pagetable(d);          \
+        UNLOCK_BIGLOCK(d);                      \
+    } while ( 0 )
+
+/* 
+ * VIRTUAL INTERRUPTS
+ * 
+ * Virtual interrupts that a guest OS may receive from Xen.
+ */
+#define VIRQ_DOM_EXC    3  /* (DOM0) Exceptional event for some domain.   */
+#define VIRQ_DEBUGGER   6  /* (DOM0) A domain has paused for debugging.   */
+#define NR_VIRQS        7
+
+
+/*
+ * Per-domain flags (domain_flags).
+ */
+ /* Is this one of the per-CPU idle domains? */
+#define _DOMF_idle_domain      0
+#define DOMF_idle_domain       (1UL<<_DOMF_idle_domain)
+ /* Is this domain privileged? */
+#define _DOMF_privileged       1
+#define DOMF_privileged        (1UL<<_DOMF_privileged)
+ /* May this domain do IO to physical devices? */
+#define _DOMF_physdev_access   2
+#define DOMF_physdev_access    (1UL<<_DOMF_physdev_access)
+ /* Guest shut itself down for some reason. */
+#define _DOMF_shutdown         3
+#define DOMF_shutdown          (1UL<<_DOMF_shutdown)
+ /* Guest is in process of shutting itself down (becomes DOMF_shutdown). */
+#define _DOMF_shuttingdown     4
+#define DOMF_shuttingdown      (1UL<<_DOMF_shuttingdown)
+ /* Death rattle. */
+#define _DOMF_dying            5
+#define DOMF_dying             (1UL<<_DOMF_dying)
+ /* Domain is paused by controller software. */
+#define _DOMF_ctrl_pause       6
+#define DOMF_ctrl_pause        (1UL<<_DOMF_ctrl_pause)
+
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by controller
+ * software to determine the appropriate action. For the most part, Xen does
+ * not care about the shutdown code.
+ */
+#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
+#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
+#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
+#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
+/*   END: Stuff for fake */
+
+#define _TEST_DOMAIN_DESTRUCTED_POWER 31 /* Keep these 2 in sync */
+#define DOMAIN_DESTRUCTED (1<<31)        /* assumes atomic_t is >= 32 bits */
+
+/* Empty functions */
+static void debugger_trap_immediate(void) {}
+static void sched_rem_domain(struct vcpu *v) {}
+static void vcpu_sleep_sync(struct vcpu *v) {}
+static void vcpu_sleep_nosync(struct vcpu *v) {}
+static void vcpu_wake(struct vcpu *v) {}
+static void show_registers(struct cpu_user_regs *regs) {}
+
+static void domain_relinquish_resources(struct domain *d) {}
+static inline void send_guest_virq(struct vcpu *v, int virq) {}
+static void free_perdomain_pt(struct domain *d) {}
+#define free_xenheap_page(v) (free_xenheap_pages(v,0))
+static void free_xenheap_pages(void *v, unsigned int order) {}
+static void arch_do_createdomain(struct vcpu *v) {}
+
+/* Override fake printk (it abort()s) */
+void printk(const char *format, ...) {}
+
+/* Only one softirq used in domain.c. */
+#define DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ  6
+
+typedef void (*softirq_handler)(void);
+softirq_handler  _test_handler;
+
+static inline void open_softirq(int nr, softirq_handler handler)
+{
+       test_cond(nr == DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ);
+        _test_handler = handler;
+}
+
+static void do_softirq(void)
+{
+       _test_handler();
+}
+
+static inline void raise_softirq(unsigned int nr) {}
+
+typedef struct {
+    /* IN variables. */
+    domid_t               domain;
+    uint16_t              vcpu;
+    /* IN/OUT parameters */
+    vcpu_guest_context_t *ctxt;
+} dom0_setdomaininfo_t;
+
+/* Forward declarations for functions from ../domain.c */
+/* FIXME: Move prototypes from sched.h to domain.h */
+struct domain *do_createdomain(domid_t dom_id, unsigned int cpu);
+struct domain *find_domain_by_id(domid_t dom);
+void domain_kill(struct domain *d);
+void domain_crash(struct domain *d);
+void domain_crash_synchronous(void);
+static void domain_shutdown_finalise(void);
+static __init int domain_shutdown_finaliser_init(void);
+void domain_shutdown(struct domain *d, u8 reason);
+void domain_pause_for_debugger(void);
+void domain_destruct(struct domain *d);
+void vcpu_pause(struct vcpu *v);
+void domain_pause(struct domain *d);
+void vcpu_unpause(struct vcpu *v);
+void domain_unpause(struct domain *d);
+void domain_pause_by_systemcontroller(struct domain *d);
+void domain_unpause_by_systemcontroller(struct domain *d);
+int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo);
+int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt);
+long do_vcpu_op(int cmd, int vcpuid, void *arg);
+long vm_assist(struct domain *p, unsigned int cmd, unsigned int type);
+
+/* FIXME: Use a list to track allocation/frees of domains and vcpus */
+static int _test_alloc_domain_error = 0;
+static struct domain *alloc_domain(void)
+{
+       struct domain *d;
+
+
+       if (_test_alloc_domain_error)
+               return NULL;
+
+       d = malloc(sizeof(struct domain));
+       test_cond(d != NULL);
+       /* FIXME: don't use memset it will defeat valgrind's memcheck */
+       memset(d, 0, sizeof(struct domain));
+       return d;
+}
+
+static void free_domain(struct domain *d)
+{
+    int i;
+    struct vcpu *v;
+
+    for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) {
+        if ( d->vcpu[i] != NULL ) {
+               v = d->vcpu[i];
+               test_cond(v->next_in_list == NULL);
+
+               if ( v->vcpu_id != 0 ) {
+                       v->domain->vcpu[v->vcpu_id - 1]->next_in_list = NULL;
+               }
+               free(v);
+       }
+    }
+
+    free(d);
+}
+
+static int _test_alloc_vcpu_error = 0;
+static struct vcpu *alloc_vcpu(struct domain *d, unsigned int vcpu_id, 
+                               unsigned int cpu_id)
+{
+       struct vcpu *v;
+
+       if (_test_alloc_vcpu_error)
+               return NULL;
+
+       test_cond(d->vcpu[vcpu_id] == NULL);
+
+       v = malloc(sizeof(struct vcpu));
+       test_cond(v != NULL);
+       /* FIXME: don't use memset it will defeat valgrind's memcheck */
+       memset(v, 0, sizeof(struct vcpu));
+
+       v->domain = d;
+       v->vcpu_id = vcpu_id;
+       v->processor = cpu_id;
+       atomic_set(&v->pausecnt, 0);
+       d->vcpu[vcpu_id] = v;
+
+       /* from sched_add_domain */
+       if ( is_idle_task(d) ) {
+                set_bit(_VCPUF_running, &v->vcpu_flags);
+       }
+
+       if ( vcpu_id != 0 ) {
+               d->vcpu[v->vcpu_id-1]->next_in_list = v;
+               set_bit(_VCPUF_down, &v->vcpu_flags);
+       }
+
+       return v;
+}
+
+
+/* from common/event_channel.c */
+static int _test_evtchn[DOMAIN_HASH_SIZE] =
+       { [0 ... DOMAIN_HASH_SIZE-1] = 42 };
+static int _test_evtchn_error = 0;
+static int evtchn_init(struct domain *d)
+{
+       int retval;
+       int slot = d->domain_id;
+
+       /* Event Channels are not created for IDLE_DOMAIN_ID */
+       test_cond(d->domain_id != IDLE_DOMAIN_ID);
+
+       if (slot >= DOMAIN_HASH_SIZE) {
+               slot /= DOMAIN_HASH_SIZE;
+       }
+
+       test_cond(slot <= DOMAIN_HASH_SIZE-1);
+
+       if (!_test_evtchn_error) {
+               _test_evtchn[slot] = 24;
+               retval = 0;
+       } else {
+               _test_evtchn[slot] = 42;
+               retval = -EINVAL;
+       }
+
+       return retval;
+}
+
+static void evtchn_destroy(struct domain *d)
+{
+       int slot  = d->domain_id;
+
+       /* Event Channels are not created for IDLE_DOMAIN_ID
+        * so destorying them makes no sense.
+        * event_channel.c handles this better than this test suite */
+       if (d->domain_id == IDLE_DOMAIN_ID)
+               return;
+
+       if (slot >= DOMAIN_HASH_SIZE) {
+               slot /= DOMAIN_HASH_SIZE;
+       }
+
+       test_cond(slot <= DOMAIN_HASH_SIZE-1);
+
+       _test_evtchn[slot] = 42;
+}
+
+/* from common/grant_table.c */
+static int _test_granttable[DOMAIN_HASH_SIZE] = 
+       { [0 ... DOMAIN_HASH_SIZE-1] = 42 };
+static int _test_granttable_error = 0;
+static int grant_table_create(struct domain *d)
+{
+       int retval;
+       int slot = d->domain_id;
+
+       /* Grant tables are not created for IDLE_DOMAIN_ID */
+       test_cond(d->domain_id != IDLE_DOMAIN_ID);
+
+       if (slot >= DOMAIN_HASH_SIZE) {
+               slot /= DOMAIN_HASH_SIZE;
+       }
+
+       test_cond(slot <= DOMAIN_HASH_SIZE-1);
+
+       if (!_test_granttable_error) {
+               _test_granttable[slot] = 24;
+               retval = 0;
+       } else {
+               _test_granttable[slot] = 42;
+               retval = -ENOMEM;
+       }
+
+       return retval;
+}
+
+static void grant_table_destroy(struct domain *d)
+{
+       int slot = d->domain_id;
+
+       /* Grant tables are not created for IDLE_DOMAIN_ID
+        * so destorying them makes no sense.
+        * grant_table.c handles this better than this test suite */
+       if (d->domain_id == IDLE_DOMAIN_ID)
+               return;
+
+       if (slot >= DOMAIN_HASH_SIZE) {
+               slot /= DOMAIN_HASH_SIZE;
+       }
+
+       test_cond(slot <= DOMAIN_HASH_SIZE-1);
+
+       _test_granttable[slot] = 42;
+}
+
+int _test_arch_set_info_guest_result = 0;
+static int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
+{
+       return _test_arch_set_info_guest_result;
+}
+
+#define guest_cpu_user_regs() (NULL)
+
+/* FIXME: The real versions of machine_halt/machine_restart are
+ * noreturns, which means that the tail end of domain_shutdown
+ * is never executed in the domain_id == 0 case.
+ * #define'ing them to return will not work becuase there is no symbol
+ * for the extern to match */
+static int _test_machine_state = 0;
+static void machine_restart(char __attribute__((unused)) *addr)
+{
+       _test_machine_state = 1;
+}
+
+static void machine_halt(void)
+{
+       _test_machine_state = 2;
+}
+
+#include "../domain.c"
+
+/* FIXME: do not re-use allocations.  malloc again! */
+int main(int argc, char *argv[])
+{
+       int i;
+       struct domain *_test_domains[DOMAIN_HASH_SIZE] = 
+                                      {[0 ... DOMAIN_HASH_SIZE-1] = NULL};
+       struct vcpu *_test_vcpus[10] = {[0 ... 9] = NULL};
+       domid_t domain_id;
+       struct domain *d;
+       struct vcpu_guest_context *_test_guest_context;
+       dom0_setdomaininfo_t *_test_setdomaininfo;
+
+       parse_test_args(argc, argv);
+
+       /* build current() */
+       current = malloc(sizeof(struct vcpu));
+       current->domain = malloc(sizeof(struct domain));
+
+       current->domain->vcpu[0] = current;
+       dom0 = current->domain;
+       spin_lock_init(&current->domain->big_lock);
+
+       /* Test domain_shutdown_finaliser_init */
+       test_cond(_test_handler == NULL);
+       domain_shutdown_finaliser_init();
+       test_cond(_test_handler == domain_shutdown_finalise);
+
+       /* Test vm_assist */
+       test_cond(vm_assist(current->domain, -1, MAX_VMASST_TYPE) == -ENOSYS);
+       test_cond(vm_assist(current->domain, VMASST_CMD_enable, 
MAX_VMASST_TYPE+1)
+                         == -EINVAL);
+
+       current->domain->vm_assist = 0UL;
+       test_cond(!vm_assist(current->domain, VMASST_CMD_enable, 
+                         VMASST_TYPE_4gb_segments));
+       test_cond(current->domain->vm_assist != 0);
+
+       test_cond(!vm_assist(current->domain, VMASST_CMD_disable, 
+                         VMASST_TYPE_4gb_segments));
+       test_cond(current->domain->vm_assist == 0);
+
+       test_cond(!vm_assist(current->domain, VMASST_CMD_enable, 
+                         VMASST_TYPE_4gb_segments_notify));
+       test_cond(current->domain->vm_assist != 0);
+
+       test_cond(!vm_assist(current->domain, VMASST_CMD_disable, 
+                         VMASST_TYPE_4gb_segments_notify));
+       test_cond(current->domain->vm_assist == 0);
+
+       test_cond(!vm_assist(current->domain, VMASST_CMD_enable, 
+                         VMASST_TYPE_writable_pagetables));
+       test_cond(current->domain->vm_assist != 0);
+
+       test_cond(!vm_assist(current->domain, VMASST_CMD_disable, 
+                         VMASST_TYPE_writable_pagetables));
+       test_cond(current->domain->vm_assist == 0);
+
+       /* Nothing to test for boot_vcpu(), it will either BUG_ON or
+        * pass back the result of arch_set_info_guest() */
+
+       /* Test do_createdomain failure paths */
+       _test_alloc_domain_error = 1;
+       _test_domains[0] = do_createdomain(0, 0);
+       test_cond(_test_domains[0] == NULL);
+       _test_alloc_domain_error = 0;
+
+       _test_granttable_error = 1;
+       _test_domains[0] = do_createdomain(0, 0);
+       test_cond(_test_domains[0] == NULL);
+       test_cond(_test_evtchn[0] == 42);
+       _test_granttable_error = 0;
+
+       _test_evtchn_error = 1;
+       _test_domains[0] = do_createdomain(0, 0);
+       test_cond(_test_domains[0] == NULL);
+       /* check event channel was destroyed */
+       test_cond(_test_evtchn[0] == 42);
+       _test_evtchn_error = 0;
+
+       _test_alloc_vcpu_error = 1;
+       _test_evtchn[0] = 99;
+       _test_granttable[0] = 99;
+       _test_domains[0] = do_createdomain(IDLE_DOMAIN_ID, 0);
+       test_cond(_test_domains[0] == NULL);
+       test_cond(_test_evtchn[0] == 99);
+       test_cond(_test_granttable[0] == 99);
+
+       _test_evtchn[0] = 42;
+       _test_granttable[0] = 42;
+       _test_domains[0] = do_createdomain(0, 0);
+       test_cond(_test_domains[0] == NULL);
+       /* check granttable was destroyed */
+       test_cond(_test_granttable[0] == 42);
+       _test_alloc_vcpu_error = 0;
+
+       /* Test do_createdomain and domain_destruct */
+       for(i=DOMAIN_HASH_SIZE-1; i>=0; i--) {
+               _test_domains[i] = do_createdomain(i, 0);
+               test_cond(_test_domains[i] != NULL);
+               test_cond(_test_domains[i]->next_in_hashbucket == NULL);
+               test_cond(_atomic_read(_test_domains[i]->refcnt) == 1);
+               test_cond(_test_evtchn[i] == 24);
+       }
+
+
+       /* Walk the list checking sorted by domain_id, and all domains are 
+        * found */
+       domain_id =  domain_list->domain_id;
+       d = domain_list;
+       i=0;
+
+       while (d->next_in_list != NULL) {
+               test_cond(domain_id <= d->domain_id);
+               d = d->next_in_list;
+               domain_id = d->domain_id;
+               i++;
+       }
+       test_cond(i == DOMAIN_HASH_SIZE-1);
+
+       /* check each hasbucket got excactly one domain */
+       for(i = 0; i< DOMAIN_HASH_SIZE; i++) {
+               int hash_val = DOMAIN_HASH(_test_domains[i]->domain_id);
+
+               test_cond(domain_hash[hash_val] == _test_domains[i]);
+               test_cond(domain_hash[hash_val]->next_in_hashbucket == NULL);
+       }
+
+       for(i = 0; i< DOMAIN_HASH_SIZE; i++) {
+               domid_t old_domainid = _test_domains[i]->domain_id;
+
+               test_cond(domain_list->domain_id == 
_test_domains[i]->domain_id);
+               test_cond(_atomic_read(domain_list->refcnt) == 1);
+
+               set_bit(_DOMF_dying, &_test_domains[i]->domain_flags);
+               domain_destruct(_test_domains[i]);
+
+               test_cond(domain_list->domain_id == 
_test_domains[i]->domain_id);
+
+               _atomic_set(domain_list->refcnt, 0); 
+               domain_destruct(_test_domains[i]);
+
+               if (i == DOMAIN_HASH_SIZE-1) {
+                       test_cond(domain_list == NULL);
+               } else {
+                       test_cond(domain_list->domain_id != old_domainid);
+               }
+
+               test_cond(_test_evtchn[i] == 42);
+               _test_domains[i] = NULL;
+       }
+
+       /* Test find_domain_by_id (Uses hashbucket ops) */
+       /* Interleave domain creation requests */
+       for(i=1; i<=2; i++) {
+               int j;
+               for(j=(DOMAIN_HASH_SIZE/4)-i; j>=0; j-=2) {
+                       int k = j*DOMAIN_HASH_SIZE;
+                       _test_domains[j] = do_createdomain(k, 0);
+                       test_cond(_test_domains[j] != NULL);
+               }
+       }
+
+       domain_id =  domain_list->domain_id;
+       d = domain_list;
+       i=0;
+
+       while (d->next_in_hashbucket != NULL) {
+               test_cond(domain_id <= d->domain_id);
+               d = d->next_in_hashbucket;
+               domain_id = d->domain_id;
+               i++;
+       }
+       test_cond(i == (DOMAIN_HASH_SIZE/4)-1);
+
+       /* find_domain_by_id will fail if the domain is DOMAIN_DESTRUCTED */
+       fake_get_domain_out = 0;
+       test_cond(get_domain(_test_domains[0]) == 0);
+       test_cond(find_domain_by_id(_test_domains[0]->domain_id) == NULL);
+       fake_get_domain_out = 1;
+
+       /* Check ref counts work as expected */
+       for(i = 0; i< (DOMAIN_HASH_SIZE/4); i++) {
+               int refcnt = _atomic_read(_test_domains[i]->refcnt);
+
+               test_cond(refcnt == 1);
+               test_cond(find_domain_by_id(_test_domains[i]->domain_id) != 
NULL);
+               test_cond(_atomic_read(_test_domains[i]->refcnt) == refcnt+1);
+
+               put_domain(_test_domains[i]);
+               test_cond(_atomic_read(_test_domains[i]->refcnt) == refcnt);
+       }
+
+       /* destroty the created domains and structures */
+       for(i = 0; i< (DOMAIN_HASH_SIZE/4); i++) {
+               set_bit(_DOMF_dying, &_test_domains[i]->domain_flags);
+               _atomic_set(domain_list->refcnt, 0);
+               domain_destruct(_test_domains[i]);
+
+               test_cond(_test_evtchn[i] == 42);
+       }
+
+       /* Test do_vcpu_op */
+       current->domain->vcpu[0] = NULL;
+       test_cond(do_vcpu_op(-1, -1, NULL) == -EINVAL);
+       test_cond(do_vcpu_op(-1, MAX_VIRT_CPUS-1, NULL) == -ENOENT);
+
+       current->domain->vcpu[0] = current;
+
+       /* What protects against invalid cmds? */
+       test_cond(!do_vcpu_op(-1, 0, NULL));
+
+       /* Need instrumentation to fail copy_to/from_user */
+       _test_guest_context = malloc(sizeof(struct vcpu_guest_context));
+       /* FIXME: don't use memset it will defeat valgrind's memcheck */
+       memset(_test_guest_context, 0, sizeof(struct vcpu_guest_context));
+
+       set_bit(_VCPUF_initialised, &current->vcpu_flags);
+       test_cond(do_vcpu_op(VCPUOP_initialise, 0, 
+              fake_to_user(_test_guest_context)) == -EEXIST);
+       clear_bit(_VCPUF_initialised, &current->vcpu_flags);
+
+       /* do_vcpu_op(VCPUOP_initialise,...) should pass back the result of
+        * boot_vcpu, which passes back the arch_set_info_guest() result */
+       _test_arch_set_info_guest_result = -EINVAL;
+       test_cond(do_vcpu_op(VCPUOP_initialise, 0, 
+              fake_to_user(_test_guest_context)) ==
+              _test_arch_set_info_guest_result);
+       _test_arch_set_info_guest_result = 0;
+       test_cond(do_vcpu_op(VCPUOP_initialise, 0, 
+               fake_to_user(_test_guest_context)) == 
+               _test_arch_set_info_guest_result);
+
+       current->vcpu_flags = 0UL;
+       test_cond(do_vcpu_op(VCPUOP_up, 0, NULL) == -EINVAL);
+
+       set_bit(_VCPUF_initialised, &current->vcpu_flags);
+       test_cond(!do_vcpu_op(VCPUOP_up, 0, NULL));
+
+       set_bit(_VCPUF_down, &current->vcpu_flags);
+       test_cond(!do_vcpu_op(VCPUOP_up, 0, NULL));
+       test_cond(!test_bit(_VCPUF_down, &current->vcpu_flags));
+
+       test_cond(do_vcpu_op(VCPUOP_is_up, 0, NULL) ==
+              !test_bit(_VCPUF_down, &current->vcpu_flags));
+
+       test_cond(!do_vcpu_op(VCPUOP_down, 0, NULL));
+       test_cond(test_bit(_VCPUF_down, &current->vcpu_flags));
+
+       /* Test set_info_guest */
+       _test_setdomaininfo = malloc(sizeof(dom0_setdomaininfo_t));
+
+       current->domain->vcpu[0] = NULL;
+       _test_setdomaininfo->vcpu = MAX_VIRT_CPUS;
+       current->domain->domain_flags = 0UL;
+       _test_setdomaininfo->ctxt = fake_from_user(_test_guest_context);
+
+       test_cond(set_info_guest(current->domain, _test_setdomaininfo) == 
-EINVAL);
+       _test_setdomaininfo->vcpu = 0;
+       test_cond(set_info_guest(current->domain, _test_setdomaininfo) == 
-EINVAL);
+       current->domain->vcpu[0] = current;
+
+       test_cond(set_info_guest(current->domain, _test_setdomaininfo) == 
-EINVAL);
+       set_bit(_DOMF_ctrl_pause, &current->domain->domain_flags);
+
+       _test_arch_set_info_guest_result = -ENOSYS;
+       test_cond(set_info_guest(current->domain, _test_setdomaininfo) ==
+              _test_arch_set_info_guest_result);
+       _test_arch_set_info_guest_result = 0;
+
+       /* Test domain_unpause_by_systemcontroller and 
+        *      domain_unpause_by_systemcontroller */
+       _test_domains[0] = do_createdomain(0, 0);
+       _test_domains[0]->domain_flags = 0UL;
+       current->domain->domain_flags = 0UL;
+       set_bit(_DOMF_ctrl_pause, &_test_domains[0]->domain_flags );
+
+       domain_unpause_by_systemcontroller(_test_domains[0]);
+       test_cond(!test_bit(_DOMF_ctrl_pause, &_test_domains[0]->domain_flags));
+       domain_pause_by_systemcontroller(_test_domains[0]);
+       test_cond(test_bit(_DOMF_ctrl_pause, &_test_domains[0]->domain_flags));
+
+       free_domain(_test_domains[0]);
+
+       /* Test vcpu_pause/vcpu_unpause and domain_pause/domain_unpause */
+       for(i=0;i<10;i++) {
+               _test_vcpus[i] = malloc(sizeof(struct vcpu));
+               test_cond(_test_vcpus[i] != NULL);
+               _test_vcpus[i]->domain = malloc(sizeof(struct domain));
+               test_cond(_test_vcpus[i]->domain != NULL);
+
+               atomic_set(&_test_vcpus[i]->pausecnt, 0);
+               _test_vcpus[i]->domain->vcpu[0] = _test_vcpus[i];
+               spin_lock_init(&_test_vcpus[i]->domain->big_lock);
+       }
+
+       for(i=0;i<10;i++) {
+               int j;
+               for(j=1;j<=9;j++) {
+                       vcpu_pause(_test_vcpus[i]);
+                       test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == j);
+               }
+
+               test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == 9);
+
+               for(j=9;j>=1;j--) {
+                       vcpu_unpause(_test_vcpus[i]);
+                       test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == 
j-1);
+               }
+
+               test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == 0);
+       }
+
+       for(i=0;i<10;i++) {
+               int j;
+               for(j=1;j<=9;j++) {
+                       domain_pause(_test_vcpus[i]->domain);
+                       test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == j);
+               }
+
+               test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == 9);
+
+               for(j=9;j>=1;j--) {
+                       domain_unpause(_test_vcpus[i]->domain);
+                       test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == 
j-1);
+               }
+
+               test_cond(atomic_read(&_test_vcpus[i]->pausecnt) == 0);
+       }
+
+       for(i=0;i<10;i++) {
+               test_cond(_test_vcpus[i]->domain->big_lock._l == 88);
+               free(_test_vcpus[i]->domain);
+               free(_test_vcpus[i]);
+       }
+
+       /* Test domain_pause_for_debugger */
+       current->domain->domain_flags = 0UL;
+       domain_pause_for_debugger();
+       test_cond(test_bit(_DOMF_ctrl_pause, &current->domain->domain_flags));
+
+       /* Test domain_shutdown */
+       _test_vcpus[0] = malloc(sizeof(struct vcpu));
+       test_cond(_test_vcpus[0] != NULL);
+       _test_vcpus[0]->domain = malloc(sizeof(struct domain));
+       test_cond(_test_vcpus[0]->domain != NULL);
+       _test_vcpus[0]->domain->vcpu[0] = _test_vcpus[0];
+       _test_domains[0] = _test_vcpus[0]->domain;
+
+       atomic_set(&_test_vcpus[0]->pausecnt, 0);
+       spin_lock_init(&_test_vcpus[0]->domain->big_lock);
+
+       _test_domains[0]->domain_id = 1;
+       set_bit(_DOMF_shuttingdown, &_test_domains[0]->domain_flags);
+       test_cond(domain_shuttingdown[smp_processor_id()] != _test_domains[0]);
+
+       domain_shutdown(_test_domains[0], 255);
+       test_cond(_test_domains[0]->shutdown_code == 255);
+       test_cond(test_bit(_DOMF_shuttingdown, 
&_test_domains[0]->domain_flags));
+
+       clear_bit(_DOMF_shuttingdown, &_test_domains[0]->domain_flags);
+       domain_shutdown(_test_domains[0], 254);
+       test_cond(_test_domains[0]->shutdown_code == 254);
+       test_cond(domain_shuttingdown[smp_processor_id()] == _test_domains[0]);
+
+       /* Now test the dodgy domain_id = 0 cases */
+       _test_domains[0]->domain_id = 0;
+       _test_domains[0]->shutdown_code = 0;
+       set_bit(_DOMF_shuttingdown, &_test_domains[0]->domain_flags);
+       domain_shuttingdown[smp_processor_id()] = NULL;
+
+       domain_shutdown(_test_domains[0], 254);
+       test_cond(_test_machine_state == 1);
+       _test_machine_state = 0;
+
+       domain_shutdown(_test_domains[0], SHUTDOWN_poweroff);
+       test_cond(_test_machine_state == 2);
+       _test_machine_state = 0;
+       
+       /* Test domain_shutdown_finalise */
+       _test_domains[0]->domain_flags = 0UL;
+       set_bit(_DOMF_shuttingdown, &_test_domains[0]->domain_flags);
+       domain_shuttingdown[smp_processor_id()] = _test_domains[0];
+
+       domain_shutdown_finalise();
+       test_cond(!test_bit(_DOMF_shuttingdown, 
&_test_domains[0]->domain_flags));
+       test_cond(test_bit(_DOMF_shutdown, &_test_domains[0]->domain_flags));
+
+       /* domain_crash/domain_crash_synchronous call previously tested
+        * functions */
+
+       /* Test domain_kill */
+       atomic_set(&_test_vcpus[0]->pausecnt, 0);
+       atomic_set(&_test_domains[0]->refcnt, 2);
+       _test_domains[0]->domain_flags = 0UL;
+       set_bit(_DOMF_dying, &_test_domains[0]->domain_flags);
+       domain_kill(_test_domains[0]);
+       test_cond(atomic_read(&_test_vcpus[0]->pausecnt) == 1);
+       test_cond(test_bit(_DOMF_dying, &_test_domains[0]->domain_flags));
+
+       clear_bit(_DOMF_dying, &_test_domains[0]->domain_flags);
+
+       domain_kill(_test_domains[0]);
+       test_cond(atomic_read(&_test_vcpus[0]->pausecnt) == 2);
+       test_cond(atomic_read(&_test_domains[0]->refcnt) == 1);
+
+       free(_test_vcpus[0]->domain);
+       free(_test_vcpus[0]);
+
+       free(_test_setdomaininfo);
+       free(_test_guest_context);
+
+       free(current->domain);
+       free(current);
+
+       return 0;
+}
diff -urN --exclude=.hg --exclude='*~' --exclude='*.aux' 
xen-unstable.hg-mainline/xen/common/test/test_event_channel.c 
xen-unstable.hg-check/xen/common/test/test_event_channel.c
--- xen-unstable.hg-mainline/xen/common/test/test_event_channel.c       
1970-01-01 10:00:00.000000000 +1000
+++ xen-unstable.hg-check/xen/common/test/test_event_channel.c  2005-12-09 
16:33:19.000000000 +1100
@@ -0,0 +1,768 @@
+/*  Tests for event_channel.c
+ *
+    Copyright (C) 2005 Tony Breeds IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#include "fake-support.h"
+
+#define        BUG()
+#define DPRINTK(...)
+
+#define for_each_vcpu(_d,_v)                    \
+ for ( (_v) = (_d)->vcpu[0];                    \
+       (_v) != NULL;                            \
+       (_v) =  NULL)
+
+/* Override fake printk (it abort()s) */
+void printk(const char *format, ...) {}
+
+static struct vcpu *_test_evtchn_set_pending_in_v;
+static int _test_evtchn_set_pending_in_port;
+static void evtchn_set_pending(struct vcpu *v, int port)
+{
+       _test_evtchn_set_pending_in_v = v;
+       _test_evtchn_set_pending_in_port = port;
+}
+
+static struct vcpu *_test_pirq_guest_bind_in_v;
+static int _test_pirq_guest_bind_in_irq;
+static int _test_pirq_guest_bind_in_will_share;
+static int _test_pirq_guest_bind_out = 0;
+static int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
+{
+       _test_pirq_guest_bind_in_v = v;
+       _test_pirq_guest_bind_in_irq = irq;
+       _test_pirq_guest_bind_in_will_share = will_share;
+
+       return _test_pirq_guest_bind_out;
+}
+
+static struct domain *_test_pirq_guest_unbind_in_d;
+static int _test_pirq_guest_unbind_in_irq;
+static int _test_pirq_guest_unbind_out = 0;
+static int pirq_guest_unbind(struct domain *d, int irq)
+{
+       _test_pirq_guest_unbind_in_d = d;
+       _test_pirq_guest_unbind_in_irq = irq;
+       
+       return _test_pirq_guest_unbind_out;
+}
+
+/* Setup a minimal vcpu/domain pair, ensuring that commonly used fields
+ * contain sensible defaults */
+static struct vcpu *test_alloc_vcpu(void)
+{
+       struct vcpu *v;
+
+       v = malloc(sizeof(struct vcpu));
+       v->domain = malloc(sizeof(struct domain));
+
+       v->domain->vcpu[0] = v;
+       v->domain->domain_id = 0;
+       atomic_set(&v->domain->refcnt, 0);
+       spin_lock_init(&v->domain->evtchn_lock);
+
+       /* Ensure that all event channels are empty */
+       memset(v->domain->evtchn, 0, 
+              NR_EVTCHN_BUCKETS * sizeof(v->domain->evtchn[0]));
+
+       return v;
+}
+
+#include "../../include/public/event_channel.h"
+
+/* needs evtchn_op_t from public/event_channel.h */
+
+static evtchn_op_t *test_alloc_evtchn_op(int cmd)
+{
+       evtchn_op_t *op;
+
+       op = malloc(sizeof(evtchn_op_t));
+       op->cmd = cmd;
+
+       return op;
+}
+
+
+static evtchn_op_t *_test_acm_pre_event_channel_in_op;
+static int _test_acm_pre_event_channel_out = 0;
+static inline int acm_pre_event_channel(evtchn_op_t *op) 
+{ 
+       _test_acm_pre_event_channel_in_op = op;
+       return _test_acm_pre_event_channel_out;
+}
+
+
+/* Forward declarations for event_channel.c */
+long evtchn_send(int lport);
+void send_guest_pirq(struct domain *d, int pirq);
+long do_event_channel_op(evtchn_op_t *uop);
+int evtchn_init(struct domain *d);
+void evtchn_destroy(struct domain *d);
+
+
+#include "../event_channel.c"
+
+int main(int argc, char *argv[])
+{
+       int i;
+
+       struct vcpu *remote_vcpu = NULL;
+       evtchn_close_t *close_opts;
+       evtchn_op_t *uop;
+       evtchn_alloc_unbound_t *unbound_alloc;
+       evtchn_bind_interdomain_t *interdomain_bind;
+       evtchn_bind_virq_t *virq_bind;
+       evtchn_bind_ipi_t *ipi_bind;
+       evtchn_bind_pirq_t *pirq_bind;
+       evtchn_status_t  *status;
+       evtchn_bind_vcpu_t *vcpu_bind;
+
+       current = test_alloc_vcpu();
+
+       parse_test_args(argc, argv);
+
+       /* Test get_free_port */
+       for(i=0; i<MAX_EVTCHNS; i++) {
+               int port = get_free_port(current->domain);
+
+               if ((port%EVTCHNS_PER_BUCKET) == 0) 
+                       fake_expect_xmalloc = 1;
+               test_cond(port == i);
+               test_cond(evtchn_from_port(current->domain, port)->state == 
+                      ECS_FREE);
+               evtchn_from_port(current->domain, port)->state = ECS_RESERVED;
+       }
+
+       test_cond(get_free_port(current->domain) == -ENOSPC);
+
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(current->domain->evtchn[i]);
+               current->domain->evtchn[i] = NULL;
+       }
+       fake_xmalloc_expected = 0;
+
+       /* Test evtchn_init */
+       current = test_alloc_vcpu();
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+       test_cond(evtchn_from_port(current->domain, 0)->state == ECS_RESERVED);
+       test_cond(evtchn_init(current->domain) == -EINVAL);
+
+       /* evtchn_init relies on evtchn_destroy to free the allocated memory */
+       xfree(current->domain->evtchn[0]);
+       current->domain->evtchn[0] = NULL;
+       fake_xmalloc_expected = 0;
+
+       /* Test __evtchn_close */
+       current = test_alloc_vcpu();
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+
+       test_cond(__evtchn_close(current->domain, -1) == -EINVAL);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       test_cond(__evtchn_close(current->domain, 0) == -EINVAL);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_RESERVED;
+       test_cond(__evtchn_close(current->domain, 0) == -EINVAL);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(__evtchn_close(current->domain, 0) == 0);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_PIRQ;
+       evtchn_from_port(current->domain, 0)->u.pirq = 0;
+       _test_pirq_guest_unbind_out = 0;
+       test_cond(__evtchn_close(current->domain, 0) ==  0);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_PIRQ;
+       evtchn_from_port(current->domain, 0)->u.pirq = ~0;
+       _test_pirq_guest_unbind_out = -1;
+       test_cond(__evtchn_close(current->domain, 0) ==  -1);
+       _test_pirq_guest_unbind_out = 0;
+
+       evtchn_from_port(current->domain, 0)->state = ECS_VIRQ;
+       evtchn_from_port(current->domain, 0)->u.virq = 0;
+       current->virq_to_evtchn[0] = 1;
+       test_cond(__evtchn_close(current->domain, 0) == 0);
+       test_cond(current->virq_to_evtchn[0] == 1);
+       test_cond(evtchn_from_port(current->domain, 0)->state == ECS_FREE);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_VIRQ;
+       evtchn_from_port(current->domain, 0)->u.virq = 0;
+       current->virq_to_evtchn[0] = 0;
+       test_cond(__evtchn_close(current->domain, 0) == 0);
+       test_cond(current->virq_to_evtchn[0] == 0);
+       test_cond(evtchn_from_port(current->domain, 0)->state == ECS_FREE);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_IPI;
+       test_cond(__evtchn_close(current->domain, 0) == 0);
+       test_cond(evtchn_from_port(current->domain, 0)->state == ECS_FREE);
+
+       remote_vcpu = test_alloc_vcpu();
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(remote_vcpu->domain) == 0);
+
+       /* bump refcnt to avoid domain_destruct from freeing the memory */
+       atomic_set(&current->domain->refcnt, 10);
+       atomic_set(&remote_vcpu->domain->refcnt, 10);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_INTERDOMAIN;
+       evtchn_from_port(remote_vcpu->domain, 0)->state = ECS_INTERDOMAIN;
+
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_dom = 
+               remote_vcpu->domain;
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_port = 0;
+       evtchn_from_port(remote_vcpu->domain, 0)->u.interdomain.remote_dom = 
+               current->domain;
+       evtchn_from_port(remote_vcpu->domain, 0)->u.interdomain.remote_port = 0;
+
+       test_cond(__evtchn_close(current->domain, 0) == 0);
+       test_cond(evtchn_from_port(current->domain, 0)->state == ECS_FREE);
+
+       /* Check alternate locking order */
+
+       evtchn_from_port(current->domain, 0)->state = ECS_INTERDOMAIN;
+       evtchn_from_port(remote_vcpu->domain, 0)->state = ECS_INTERDOMAIN;
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_dom = 
+                        remote_vcpu->domain;
+       evtchn_from_port(remote_vcpu->domain, 0)->u.interdomain.remote_dom = 
+                        current->domain;
+       evtchn_from_port(remote_vcpu->domain, 0)->u.interdomain.remote_port = 0;
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_port = 0;
+       test_cond(__evtchn_close(remote_vcpu->domain, 0) == 0);
+       test_cond(evtchn_from_port(remote_vcpu->domain, 0)->state == ECS_FREE);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_INTERDOMAIN;
+       evtchn_from_port(remote_vcpu->domain, 0)->state = ECS_INTERDOMAIN;
+
+       /* Fail  to get_domain() */
+       fake_get_domain_out = 0;
+       test_cond(__evtchn_close(current->domain, 0) == 0);
+       test_cond(evtchn_from_port(current->domain, 0)->state == 
+                       ECS_INTERDOMAIN);
+       fake_get_domain_out = 1;
+
+       /* Clear out the allocated event_ channels */
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(current->domain->evtchn[i]);
+       }
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(remote_vcpu->domain->evtchn[i]);
+       }
+       fake_xmalloc_expected = 0;
+
+       /* Test evtchn_close */
+       current = test_alloc_vcpu();
+       close_opts = malloc(sizeof(evtchn_close_t));
+       close_opts->port = 0;
+
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(evtchn_close(close_opts) == 0);
+       test_cond(evtchn_from_port(current->domain, 0)->state == ECS_FREE);
+
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(current->domain->evtchn[i]);
+       }
+       fake_xmalloc_expected = 0;
+
+       free(close_opts);
+
+       /* Test evtchn_destroy */
+       current = test_alloc_vcpu();
+       fake_xfree_in_ptr = NULL;
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+
+       fake_check_status(__FILE__, __LINE__);
+       evtchn_destroy(current->domain);
+       fake_xmalloc_expected = 0;
+       test_cond(fake_xfree_in_ptr != NULL);
+
+       /* Test evtchn_send */
+       current = test_alloc_vcpu();
+
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+       test_cond(evtchn_send(-1) == -EINVAL);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       test_cond(evtchn_send(0) == -EINVAL);
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(evtchn_send(0) == 0);
+       evtchn_from_port(current->domain, 0)->state = ECS_IPI;
+       evtchn_from_port(current->domain, 0)->notify_vcpu_id = 0;
+       test_cond(evtchn_send(0) == 0);
+
+       remote_vcpu = test_alloc_vcpu();
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(remote_vcpu->domain) == 0);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_INTERDOMAIN;
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_dom = 
+                       remote_vcpu->domain;
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_port = 0;
+
+       evtchn_from_port(remote_vcpu->domain, 0)->notify_vcpu_id = 0;
+       test_cond(evtchn_send(0) == 0);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       evtchn_from_port(remote_vcpu->domain, 0)->state = ECS_FREE;
+       evtchn_destroy(current->domain);
+       evtchn_destroy(remote_vcpu->domain);
+       fake_xmalloc_expected = 0;
+
+       /* Test evtchn_alloc_unbound */
+       current = test_alloc_vcpu();
+       unbound_alloc = malloc(sizeof(evtchn_alloc_unbound_t));
+       unbound_alloc->dom = DOMID_SELF-1;
+
+       fake_IS_PRIV_out = 0;
+       fake_find_domain_out = NULL;
+
+       test_cond(evtchn_alloc_unbound(unbound_alloc) == -EPERM);
+       fake_IS_PRIV_out = 1;
+
+       unbound_alloc->dom = DOMID_SELF;
+       test_cond(evtchn_alloc_unbound(unbound_alloc) == -ESRCH);
+       fake_find_domain_out = current->domain;
+
+       /* Bump the refcnt to avoid put_domain() freeing the domain */
+       atomic_set(&current->domain->refcnt, 10);
+
+       /* Not testing for get_free_port failures here */
+
+       current->domain->domain_id = 1;
+       unbound_alloc->remote_dom = DOMID_SELF;
+       unbound_alloc->dom = DOMID_SELF;
+       unbound_alloc->port = -1;
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_alloc_unbound(unbound_alloc) == 0);
+       test_cond(unbound_alloc->port != -1);
+       
+       test_cond(evtchn_from_port(current->domain, 
+                                  unbound_alloc->port)->state == ECS_UNBOUND);
+       test_cond(evtchn_from_port(current->domain, 
+                                  unbound_alloc->port)->u.unbound.remote_domid 
+                       == current->domain->domain_id);
+
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(current->domain->evtchn[i]);
+       }
+       fake_xmalloc_expected = 0;
+       free(unbound_alloc);
+
+       /* Test evtchn_bind_interdomain */
+       current = test_alloc_vcpu();
+       interdomain_bind = malloc(sizeof(evtchn_bind_interdomain_t));
+
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+       test_cond(get_free_port(current->domain) == 1);
+
+       interdomain_bind->remote_dom = DOMID_SELF;
+
+       fake_find_domain_out = NULL;
+       test_cond(evtchn_bind_interdomain(interdomain_bind) == -ESRCH);
+       fake_find_domain_out = current->domain;
+
+       atomic_set(&current->domain->refcnt, 10);
+       /* not testing get_free_port failures here */
+
+       interdomain_bind->remote_port = -1;
+       test_cond(evtchn_bind_interdomain(interdomain_bind) == -EINVAL);
+
+       interdomain_bind->remote_port = 1;
+       evtchn_from_port(current->domain, 1)->state = ECS_RESERVED;
+       test_cond(evtchn_bind_interdomain(interdomain_bind) == -EINVAL);
+
+       remote_vcpu = test_alloc_vcpu();
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(remote_vcpu->domain) == 0);
+       atomic_set(&remote_vcpu->domain->refcnt, 10);
+
+       fake_find_domain_out = remote_vcpu->domain;
+       remote_vcpu->domain->domain_id = 42;
+       interdomain_bind->remote_dom = 42;
+       interdomain_bind->remote_port = 0;
+       evtchn_from_port(remote_vcpu->domain, 0)->state = ECS_UNBOUND;
+       current->domain->domain_id = 1;
+       evtchn_from_port(remote_vcpu->domain, 0)->u.unbound.remote_domid = 0;
+
+       test_cond(evtchn_bind_interdomain(interdomain_bind) == -EINVAL);
+
+       current->domain->domain_id = 1;
+       evtchn_from_port(remote_vcpu->domain, 0)->u.unbound.remote_domid = 1;
+       test_cond(evtchn_bind_interdomain(interdomain_bind) == 0);
+
+       test_cond(evtchn_from_port(remote_vcpu->domain, 0)->state == 
+                       ECS_INTERDOMAIN);
+       test_cond(evtchn_from_port(current->domain, 
+                                  interdomain_bind->local_port)->state == 
+                       ECS_INTERDOMAIN);
+
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(current->domain->evtchn[i]);
+       }
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(remote_vcpu->domain->evtchn[i]);
+       }
+       fake_xmalloc_expected = 0;
+
+       /* Test the current > remote case */
+       remote_vcpu = test_alloc_vcpu();
+       current = test_alloc_vcpu();
+
+       /* This /should/ be good enough to ensure the correct ordering */
+       if (current < remote_vcpu) {
+               struct vcpu *tmp = remote_vcpu;
+               remote_vcpu = current;
+               current = tmp;
+       }
+
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(remote_vcpu->domain) == 0);
+
+       atomic_set(&current->domain->refcnt, 10);
+       atomic_set(&remote_vcpu->domain->refcnt, 10);
+
+       current->domain->domain_id = 1;
+       remote_vcpu->domain->domain_id = 2;
+
+       interdomain_bind->remote_dom = 2;
+       interdomain_bind->remote_port = 0;
+       fake_find_domain_out = remote_vcpu->domain;
+
+       evtchn_from_port(remote_vcpu->domain, 0)->state = ECS_UNBOUND;
+       evtchn_from_port(remote_vcpu->domain, 0)->u.unbound.remote_domid = 1;
+       test_cond(evtchn_bind_interdomain(interdomain_bind) == 0);
+
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(current->domain->evtchn[i]);
+       }
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(remote_vcpu->domain->evtchn[i]);
+       }
+       fake_xmalloc_expected = 0;
+       free(interdomain_bind);
+
+       /* Test evtchn_bind_virq */
+       current = test_alloc_vcpu();
+       virq_bind = malloc(sizeof(evtchn_bind_virq_t));
+
+       virq_bind->virq = ARRAY_SIZE(current->virq_to_evtchn) +1;
+       virq_bind->vcpu = ARRAY_SIZE(current->domain->vcpu) +1;
+       current->domain->vcpu[0] = NULL;
+       current->virq_to_evtchn[0] = 1;
+
+       test_cond(evtchn_bind_virq(virq_bind) == -EINVAL);
+
+       virq_bind->virq = 0;
+       test_cond(evtchn_bind_virq(virq_bind) == -ENOENT);
+       virq_bind->vcpu = 0;
+       test_cond(evtchn_bind_virq(virq_bind) == -ENOENT);
+       current->domain->vcpu[0] = current;
+       test_cond(evtchn_bind_virq(virq_bind) == -EEXIST);
+       current->virq_to_evtchn[0] = 0;
+       /* not testing get_free_port failures here */
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_bind_virq(virq_bind) == 0);
+       fake_check_status(__FILE__, __LINE__);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       evtchn_destroy(current->domain);
+       fake_xmalloc_expected = 0;
+       free(virq_bind);
+
+       /* Test evtchn_bind_ipi */
+       current = test_alloc_vcpu();
+       ipi_bind = malloc(sizeof(evtchn_bind_ipi_t));
+
+       ipi_bind->vcpu = ARRAY_SIZE(current->domain->vcpu) +1;
+       current->domain->vcpu[0] = NULL;
+       
+       test_cond(evtchn_bind_ipi(ipi_bind) == -ENOENT);
+       ipi_bind->vcpu = 0;
+       test_cond(evtchn_bind_ipi(ipi_bind) == -ENOENT);
+       current->domain->vcpu[0] = current;
+       /* not testing get_free_port failures here */
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_bind_ipi(ipi_bind) == 0);
+       fake_check_status(__FILE__, __LINE__);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       evtchn_destroy(current->domain);
+       fake_xmalloc_expected = 0;
+       free(ipi_bind); 
+
+       /* Test evtchn_bind_pirq */
+       current = test_alloc_vcpu();
+       pirq_bind = malloc(sizeof(evtchn_bind_pirq_t));
+
+       pirq_bind->pirq = ARRAY_SIZE(current->domain->pirq_to_evtchn) +1;
+       current->domain->pirq_to_evtchn[0] = 1;
+
+       test_cond(evtchn_bind_pirq(pirq_bind) == -EINVAL);
+       pirq_bind->pirq = 0;
+       test_cond(evtchn_bind_pirq(pirq_bind) == -EEXIST);
+       current->domain->pirq_to_evtchn[0] = 0;
+       /* not testing get_free_port failures here */
+
+       _test_pirq_guest_bind_out = -1;
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_bind_pirq(pirq_bind) == _test_pirq_guest_bind_out);
+       _test_pirq_guest_bind_out = 0;
+       test_cond(evtchn_bind_pirq(pirq_bind) == 0);
+       fake_check_status(__FILE__, __LINE__);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       evtchn_destroy(current->domain);
+       fake_xmalloc_expected = 0;
+
+       free(pirq_bind);
+
+       /* FIXME: Not testing send_guest_pirq */
+
+       /* Testing evtchn_status */
+       current = test_alloc_vcpu();
+       status= malloc(sizeof(evtchn_status_t));
+
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+       atomic_set(&current->domain->refcnt, 10);
+
+       status->dom = 0;
+       status->port = -1;
+       fake_find_domain_out = NULL;
+
+       fake_IS_PRIV_out = 0;
+       test_cond(evtchn_status(status) == -EPERM);
+       fake_IS_PRIV_out = 1;
+       status->dom = DOMID_SELF;
+
+       test_cond(evtchn_status(status) == -ESRCH);
+       fake_find_domain_out = current->domain;
+       test_cond(evtchn_status(status) == -EINVAL);
+
+       status->port = 0;
+       status->status = EVTCHNSTAT_closed+1;
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       test_cond(evtchn_status(status) == 0);
+       test_cond(status->status == EVTCHNSTAT_closed);
+
+       status->status = EVTCHNSTAT_unbound+1;
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(evtchn_status(status) == 0);
+       test_cond(status->status == EVTCHNSTAT_unbound);
+
+       status->status = EVTCHNSTAT_interdomain+1;
+       evtchn_from_port(current->domain, 0)->state = ECS_INTERDOMAIN;
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_dom = 
+                       current->domain;
+       evtchn_from_port(current->domain, 0)->u.interdomain.remote_port = 1;
+       test_cond(evtchn_status(status) == 0);
+       test_cond(status->status == EVTCHNSTAT_interdomain);
+       test_cond(status->u.interdomain.port == 1);
+
+       status->status = EVTCHNSTAT_pirq+1;
+       evtchn_from_port(current->domain, 0)->state = ECS_PIRQ;
+       evtchn_from_port(current->domain, 0)->u.pirq = 1;
+       test_cond(evtchn_status(status) == 0);
+       test_cond(status->status == EVTCHNSTAT_pirq);
+       test_cond(status->u.pirq == 1);
+
+       status->status = EVTCHNSTAT_virq+1;
+       evtchn_from_port(current->domain, 0)->state = ECS_VIRQ;
+       evtchn_from_port(current->domain, 0)->u.virq = 10; 
+       test_cond(evtchn_status(status) == 0);
+       test_cond(status->status == EVTCHNSTAT_virq);
+       test_cond(status->u.pirq == 10);
+
+       status->status = EVTCHNSTAT_ipi+1;
+       evtchn_from_port(current->domain, 0)->state = ECS_IPI;
+       test_cond(evtchn_status(status) == 0);
+       test_cond(status->status == EVTCHNSTAT_ipi);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       evtchn_destroy(current->domain);
+       fake_xmalloc_expected = 0;
+
+       free(status);
+
+       /* Test evtchn_bind_vcpu */
+       current = test_alloc_vcpu();
+       vcpu_bind = malloc(sizeof(evtchn_bind_vcpu_t));
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+
+       vcpu_bind->port = -1;
+       vcpu_bind->vcpu=ARRAY_SIZE(current->domain->vcpu) +1;
+       current->domain->vcpu[0] = NULL;
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       evtchn_from_port(current->domain, 0)->notify_vcpu_id = 10;
+
+       test_cond(evtchn_bind_vcpu(vcpu_bind) == -ENOENT);
+       vcpu_bind->vcpu=0;
+       test_cond(evtchn_bind_vcpu(vcpu_bind) == -ENOENT);
+
+       current->domain->vcpu[0] = current;
+       test_cond(evtchn_bind_vcpu(vcpu_bind) == -EINVAL);
+
+       vcpu_bind->port = 0;
+       test_cond(evtchn_bind_vcpu(vcpu_bind) == -EINVAL);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(evtchn_bind_vcpu(vcpu_bind) == 0);
+       test_cond(evtchn_from_port(current->domain, 0)->notify_vcpu_id == 0);
+
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       evtchn_destroy(current->domain);
+       fake_xmalloc_expected = 0;
+
+       free(vcpu_bind);
+
+       /* Test do_event_channel_op */
+       current = test_alloc_vcpu();
+       uop = test_alloc_evtchn_op(15);
+
+       fake_expect_xmalloc = 1;
+       test_cond(evtchn_init(current->domain) == 0);
+       atomic_set(&current->domain->refcnt, 10);
+
+       _test_acm_pre_event_channel_out = 1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -EACCES);
+       _test_acm_pre_event_channel_out = 0;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -ENOSYS);
+
+       /* Setup helper functions to fail to test an overall failure of
+        * do_event_channel_op */
+       fake_IS_PRIV_out = 1;
+       fake_find_domain_out = NULL;
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_alloc_unbound);
+       uop->u.alloc_unbound.dom = DOMID_SELF +1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -ESRCH);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_interdomain);
+       uop->u.bind_interdomain.remote_dom = DOMID_SELF +1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -ESRCH);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_virq);
+       uop->u.bind_virq.virq = NR_VIRQS +1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -EINVAL);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_ipi);
+       uop->u.bind_ipi.vcpu = MAX_VIRT_CPUS +1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -ENOENT);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_pirq);
+       uop->u.bind_pirq.pirq = NR_PIRQS +1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -EINVAL);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_close);
+       uop->cmd = EVTCHNOP_close;
+       uop->u.close.port = -1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -EINVAL);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_send);
+       uop->u.send.port = -1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -EINVAL);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_status);
+       uop->u.status.dom = DOMID_SELF +1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -ESRCH);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_vcpu);
+       uop->u.bind_vcpu.vcpu = MAX_VIRT_CPUS +1;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == -ENOENT);
+
+       /* Setup helper functions to succeed to test an overall success of
+        * do_event_channel_op */
+       fake_IS_PRIV_out = 1;
+       fake_find_domain_out = current->domain;
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_alloc_unbound);
+       uop->u.alloc_unbound.remote_dom = DOMID_SELF;
+       uop->u.alloc_unbound.dom = DOMID_SELF;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_interdomain);
+       uop->u.bind_interdomain.remote_dom = DOMID_SELF;
+       uop->u.bind_interdomain.remote_port = 0;
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       evtchn_from_port(current->domain, 0)->u.unbound.remote_domid = 
+               current->domain->domain_id;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_virq);
+       uop->u.bind_virq.virq = 0;
+       uop->u.bind_virq.vcpu = 0;
+       current->virq_to_evtchn[0] = 0;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_ipi);
+       uop->u.bind_ipi.vcpu = 0;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_pirq);
+       uop->u.bind_pirq.pirq = 0;
+       current->domain->pirq_to_evtchn[0] = 0;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_close);
+       uop->u.close.port = 0;
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_send);
+       uop->u.send.port = 0;
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_status);
+       uop->u.status.dom = DOMID_SELF;
+       uop->u.status.port = 0;
+       evtchn_from_port(current->domain, 0)->state = ECS_FREE;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       uop = test_alloc_evtchn_op(EVTCHNOP_bind_vcpu);
+       uop->u.bind_vcpu.vcpu = 0;
+       uop->u.bind_vcpu.port = 0;
+       evtchn_from_port(current->domain, 0)->state = ECS_UNBOUND;
+       test_cond(do_event_channel_op(fake_from_user(uop)) == 0);
+
+       free(uop);
+
+       for(i=0; i<NR_EVTCHN_BUCKETS; i++) {
+               xfree(current->domain->evtchn[i]);
+       }
+       fake_xmalloc_expected = 0;
+
+       fake_check_status(__FILE__, __LINE__);
+       return 0;
+}

-- 
 ccontrol: http://freshmeat.net/projects/ccontrol

----- End forwarded message -----

Yours Tony

   linux.conf.au       http://linux.conf.au/ || http://lca2006.linux.org.au/
   Jan 23-28 2006      The Australian Linux Technical Conference!


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.