# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 58d6c9cb95c65e7583c838d0bbfc4412f518195c
# Parent 02c6bf903a8e0824e0c5ac07b7f85b9669316c45
# Parent 6d832b092857c25cfee17d66a186c19a8d372ebe
branch merge with xen-unstable.hg
---
extras/mini-os/minios-x86_32.lds | 45 --
extras/mini-os/minios-x86_64.lds | 54 --
extras/mini-os/x86_32.S | 287 -------------
extras/mini-os/x86_64.S | 385 ------------------
docs/man/xm.pod.1 | 3
docs/man/xmdomain.cfg.pod.5 | 49 ++
extras/mini-os/Makefile | 168 +++----
extras/mini-os/arch/x86/Makefile | 29 +
extras/mini-os/arch/x86/arch.mk | 28 +
extras/mini-os/arch/x86/minios-x86_32.lds | 45 ++
extras/mini-os/arch/x86/minios-x86_64.lds | 54 ++
extras/mini-os/arch/x86/x86_32.S | 287 +++++++++++++
extras/mini-os/arch/x86/x86_64.S | 385 ++++++++++++++++++
extras/mini-os/minios.mk | 62 ++
linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c | 2
linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c | 16
tools/libfsimage/common/Makefile | 2
tools/libfsimage/common/fsimage.c | 2
tools/libfsimage/common/fsimage_grub.c | 2
tools/libfsimage/common/fsimage_plugin.c | 8
tools/libfsimage/common/fsimage_plugin.h | 5
tools/libfsimage/common/mapfile-GNU | 3
tools/libfsimage/common/mapfile-SunOS | 3
tools/libfsimage/ext2fs-lib/ext2fs-lib.c | 2
tools/python/xen/xend/XendConfig.py | 1
tools/python/xen/xend/XendNode.py | 10
xen/arch/x86/hvm/i8254.c | 14
xen/arch/x86/microcode.c | 9
xen/arch/x86/mm.c | 13
xen/arch/x86/x86_emulate.c | 21
xen/include/asm-powerpc/byteorder.h | 80 +++
xen/include/asm-x86/byteorder.h | 36 +
xen/include/xen/byteorder/big_endian.h | 106 ++++
xen/include/xen/byteorder/generic.h | 68 +++
xen/include/xen/byteorder/little_endian.h | 106 ++++
xen/include/xen/byteorder/swab.h | 185 ++++++++
xen/include/xen/config.h | 2
xen/include/xen/types.h | 7
38 files changed, 1665 insertions(+), 919 deletions(-)
diff -r 02c6bf903a8e -r 58d6c9cb95c6 docs/man/xm.pod.1
--- a/docs/man/xm.pod.1 Wed Jan 17 14:17:31 2007 -0500
+++ b/docs/man/xm.pod.1 Wed Jan 17 14:57:04 2007 -0500
@@ -451,6 +451,7 @@ make the man page more readable):
xen_minor : 0
xen_extra : -devel
xen_caps : xen-3.0-x86_32
+ xen_scheduler : credit
xen_pagesize : 4096
platform_params : virt_start=0xfc000000
xen_changeset : Mon Nov 14 18:13:38 2005 +0100
@@ -460,7 +461,7 @@ make the man page more readable):
cc_compile_by : sdague
cc_compile_domain : (none)
cc_compile_date : Mon Nov 14 14:16:48 EST 2005
- xend_config_format : 2
+ xend_config_format : 3
B<FIELDS>
diff -r 02c6bf903a8e -r 58d6c9cb95c6 docs/man/xmdomain.cfg.pod.5
--- a/docs/man/xmdomain.cfg.pod.5 Wed Jan 17 14:17:31 2007 -0500
+++ b/docs/man/xmdomain.cfg.pod.5 Wed Jan 17 14:57:04 2007 -0500
@@ -135,6 +135,55 @@ one will be randomly chosen by xen with
=back
+=item B<vfb>
+
+A virtual frame buffer stanza in the form:
+
+ vfb = [ "stanza" ]
+
+The stanza specifies a set of I<name = value> options separated by
+commas, in the form: "name1=value1, name2=value2, ..."
+
+B<OPTIONS>
+
+=over 4
+
+=item I<type>
+
+There are currently two valid options: I<vnc> starts a VNC server that
+lets you connect an external VNC viewer, and I<sdl> starts an internal
+viewer.
+
+=item I<vncdisplay>
+
+The VNC display number to use, defaults to the domain ID. The
+VNC server listens on port 5900 + display number.
+
+=item I<vnclisten>
+
+The listening address for the VNC server, default 127.0.0.1.
+
+=item I<vncunused>
+
+If non-zero, the VNC server listens on the first unused port above
+5900.
+
+=item I<vncpasswd>
+
+Overrides the XenD configured default password.
+
+=item I<display>
+
+Display to use for the internal viewer, defaults to environment
+variable I<DISPLAY>.
+
+=item I<xauthority>
+
+Authority file to use for the internal viewer, defaults to environment
+variable I<XAUTHORITY>.
+
+=back
+
=back
=head1 ADDITIONAL OPTIONS
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/Makefile
--- a/extras/mini-os/Makefile Wed Jan 17 14:17:31 2007 -0500
+++ b/extras/mini-os/Makefile Wed Jan 17 14:57:04 2007 -0500
@@ -1,112 +1,88 @@ debug ?= y
-debug ?= y
+# Common Makefile for mini-os.
+#
+# Every architecture directory below mini-os/arch has to have a
+# Makefile and a arch.mk.
+#
+
pae ?= n
XEN_ROOT = ../..
include $(XEN_ROOT)/Config.mk
+XEN_INTERFACE_VERSION := 0x00030204
+export XEN_INTERFACE_VERSION
+
# Set TARGET_ARCH
-override TARGET_ARCH := $(XEN_TARGET_ARCH)
+override TARGET_ARCH := $(XEN_TARGET_ARCH)
-XEN_INTERFACE_VERSION := 0x00030204
+# Set mini-os root path, used in mini-os.mk.
+MINI-OS_ROOT=$(PWD)
+export MINI-OS_ROOT
-# NB. '-Wcast-qual' is nasty, so I omitted it.
-CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
-CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
-CFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION)
+# Try to find out the architecture family TARGET_ARCH_FAM.
+# First check whether x86_... is contained (for x86_32, x86_32y, x86_64).
+# If not x86 then use $(TARGET_ARCH) -> for ia64, ...
+ifeq ($(findstring x86_,$(TARGET_ARCH)),x86_)
+TARGET_ARCH_FAM = x86
+else
+TARGET_ARCH_FAM = $(TARGET_ARCH)
+endif
-ASFLAGS = -D__ASSEMBLY__
+# The architecture family directory below mini-os.
+TARGET_ARCH_DIR := arch/$(TARGET_ARCH_FAM)
-LDLIBS = -L. -lminios
-LDFLAGS_FINAL := -N -T minios-$(TARGET_ARCH).lds
-LDFLAGS :=
+# Export these variables for possible use in architecture dependent makefiles.
+export TARGET_ARCH
+export TARGET_ARCH_DIR
+export TARGET_ARCH_FAM
+
+# This is used for architecture specific links.
+# This can be overwritten from arch specific rules.
+ARCH_LINKS =
+
+# For possible special header directories.
+# This can be overwritten from arch specific rules.
+EXTRA_INC =
+
+# Special build dependencies.
+# Build all after touching this/these file(s) (see minios.mk)
+SPEC_DEPENDS = minios.mk
+
+# Include the architecture family's special makerules.
+# This must be before include minios.mk!
+include $(TARGET_ARCH_DIR)/arch.mk
+
+# Include common mini-os makerules.
+include minios.mk
+
+# Define some default flags for linking.
+LDLIBS :=
+LDFLAGS :=
+LDARCHLIB := -L$(TARGET_ARCH_DIR) -l$(ARCH_LIB_NAME)
+LDFLAGS_FINAL := -N -T $(TARGET_ARCH_DIR)/minios-$(TARGET_ARCH).lds
# Prefix for global API names. All other symbols are localised before
# linking with EXTRA_OBJS.
GLOBAL_PREFIX := xenos_
EXTRA_OBJS =
-# For possible special source directories.
-EXTRA_SRC =
-# For possible special header directories.
-EXTRA_INC =
-
-# Standard name for architecture specific subdirectories.
-TARGET_ARCH_DIR = $(TARGET_ARCH)
-# This is used for architecture specific links.
-ARCH_LINKS =
-
-ifeq ($(TARGET_ARCH),x86_32)
-CFLAGS += -m32 -march=i686
-LDFLAGS += -m elf_i386
-TARGET_ARCH_DIR = x86
-EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
-EXTRA_SRC += arch/$(EXTRA_INC)
-endif
-
-ifeq ($(TARGET_ARCH)$(pae),x86_32y)
-CFLAGS += -DCONFIG_X86_PAE=1
-ASFLAGS += -DCONFIG_X86_PAE=1
-TARGET_ARCH_DIR = x86
-EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
-EXTRA_SRC += arch/$(EXTRA_INC)
-endif
-
-ifeq ($(TARGET_ARCH),x86_64)
-CFLAGS += -m64 -mno-red-zone -fpic -fno-reorder-blocks
-CFLAGS += -fno-asynchronous-unwind-tables
-LDFLAGS += -m elf_x86_64
-TARGET_ARCH_DIR = x86
-EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
-EXTRA_SRC += arch/$(EXTRA_INC)
-endif
-
-ifeq ($(TARGET_ARCH),ia64)
-CFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -mconstant-gp
-ASFLAGS += -x assembler-with-cpp -Wall
-ASFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -fomit-frame-pointer
-ASFLAGS += -fno-builtin -fno-common -fno-strict-aliasing -mconstant-gp
-ARCH_LINKS = IA64_LINKS # Special link on ia64 needed
-define arch_links
-[ -e include/ia64/asm-xsi-offsets.h ] || ln -sf
../../../../xen/include/asm-ia64/asm-xsi-offsets.h
include/ia64/asm-xsi-offsets.h
-endef
-endif
-
-ifeq ($(debug),y)
-CFLAGS += -g
-else
-CFLAGS += -O3
-endif
-
-# Add the special header directories to the include paths.
-extra_incl := $(foreach dir,$(EXTRA_INC),-Iinclude/$(dir))
-override CPPFLAGS := -Iinclude $(CPPFLAGS) -Iinclude/$(TARGET_ARCH_DIR)
$(extra_incl)
-
TARGET := mini-os
-HEAD := $(TARGET_ARCH).o
+# Subdirectories common to mini-os
+SUBDIRS := lib xenbus console
+
+# The common mini-os objects to build.
OBJS := $(patsubst %.c,%.o,$(wildcard *.c))
OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c))
OBJS += $(patsubst %.c,%.o,$(wildcard xenbus/*.c))
OBJS += $(patsubst %.c,%.o,$(wildcard console/*.c))
-OBJS += $(patsubst %.S,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.S))
-OBJS += $(patsubst %.c,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.c))
-# For special wanted source directories.
-extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.c,%.o,$(wildcard
$(dir)/*.c)))
-OBJS += $(extra_objs)
-extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.S,%.o,$(wildcard
$(dir)/*.S)))
-OBJS += $(extra_objs)
-HDRS := $(wildcard include/*.h)
-HDRS += $(wildcard include/xen/*.h)
-HDRS += $(wildcard include/$(TARGET_ARCH_DIR)/*.h)
-# For special wanted header directories.
-extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h))
-HDRS += $(extra_heads)
.PHONY: default
default: $(TARGET)
-# Create special architecture specific links.
+# Create special architecture specific links. The function arch_links
+# has to be defined in arch.mk (see include above).
ifneq ($(ARCH_LINKS),)
$(ARCH_LINKS):
$(arch_links)
@@ -116,24 +92,29 @@ links: $(ARCH_LINKS)
links: $(ARCH_LINKS)
[ -e include/xen ] || ln -sf ../../../xen/include/public include/xen
-$(TARGET): links $(OBJS) $(HEAD)
- $(LD) -r $(LDFLAGS) $(HEAD) $(OBJS) -o $@.o
+.PHONY: arch_lib
+arch_lib:
+ $(MAKE) --directory=$(TARGET_ARCH_DIR) || exit 1;
+
+$(TARGET): links $(OBJS) arch_lib
+ $(LD) -r $(LDFLAGS) $(HEAD_OBJ) $(OBJS) $(LDARCHLIB) -o $@.o
$(OBJCOPY) -w -G $(GLOBAL_PREFIX)* -G _start $@.o $@.o
$(LD) $(LDFLAGS) $(LDFLAGS_FINAL) $@.o $(EXTRA_OBJS) -o $@
gzip -f -9 -c $@ >$@.gz
-.PHONY: clean
-clean:
- find . -type f -name '*.o' | xargs rm -f
- rm -f *.o *~ core $(TARGET) $(TARGET).gz
+.PHONY: clean arch_clean
+
+arch_clean:
+ $(MAKE) --directory=$(TARGET_ARCH_DIR) clean || exit 1;
+
+clean: arch_clean
+ for dir in $(SUBDIRS); do \
+ rm -f $$dir/*.o; \
+ done
+ rm -f *.o *~ core $(TARGET).elf $(TARGET).raw $(TARGET) $(TARGET).gz
find . -type l | xargs rm -f
rm -f tags TAGS
-%.o: %.c $(HDRS) Makefile
- $(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
-
-%.o: %.S $(HDRS) Makefile
- $(CC) $(ASFLAGS) $(CPPFLAGS) -c $< -o $@
define all_sources
( find . -follow -name SCCS -prune -o -name '*.[chS]' -print )
@@ -147,3 +128,4 @@ cscope:
.PHONY: tags
tags:
$(all_sources) | xargs ctags
+
diff -r 02c6bf903a8e -r 58d6c9cb95c6
linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c Wed Jan 17
14:17:31 2007 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c Wed Jan 17
14:57:04 2007 -0500
@@ -85,7 +85,7 @@ static ssize_t microcode_write (struct f
{
ssize_t ret;
- if (len < DEFAULT_UCODE_TOTALSIZE) {
+ if (len < MC_HEADER_SIZE) {
printk(KERN_ERR "microcode: not enough data\n");
return -EINVAL;
}
diff -r 02c6bf903a8e -r 58d6c9cb95c6
linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c Wed Jan 17 14:17:31
2007 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c Wed Jan 17 14:57:04
2007 -0500
@@ -232,9 +232,12 @@ static void dump_fault_path(unsigned lon
p += (address >> 21) * 2;
printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
page, p[1], p[0]);
-#ifndef CONFIG_HIGHPTE
+ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
+#ifdef CONFIG_HIGHPTE
+ if (mfn_to_pfn(mfn) >= highstart_pfn)
+ return;
+#endif
if (p[0] & 1) {
- mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
page = mfn_to_pfn(mfn) << PAGE_SHIFT;
p = (unsigned long *) __va(page);
address &= 0x001fffff;
@@ -242,7 +245,6 @@ static void dump_fault_path(unsigned lon
printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
page, p[1], p[0]);
}
-#endif
}
}
#else
@@ -254,13 +256,16 @@ static void dump_fault_path(unsigned lon
page = ((unsigned long *) __va(page))[address >> 22];
printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
machine_to_phys(page));
+#ifdef CONFIG_HIGHPTE
/*
* We must not directly access the pte in the highpte
- * case, the page table might be allocated in highmem.
+ * case if the page table is located in highmem.
* And lets rather not kmap-atomic the pte, just in case
* it's allocated already.
*/
-#ifndef CONFIG_HIGHPTE
+ if ((page >> PAGE_SHIFT) >= highstart_pfn)
+ return;
+#endif
if (page & 1) {
page &= PAGE_MASK;
address &= 0x003ff000;
@@ -269,7 +274,6 @@ static void dump_fault_path(unsigned lon
printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
machine_to_phys(page));
}
-#endif
}
#endif
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/common/Makefile
--- a/tools/libfsimage/common/Makefile Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/common/Makefile Wed Jan 17 14:57:04 2007 -0500
@@ -1,7 +1,7 @@ XEN_ROOT = ../../..
XEN_ROOT = ../../..
include $(XEN_ROOT)/tools/Rules.mk
-MAJOR = 1.0
+MAJOR = 1.1
MINOR = 0
CFLAGS += -Werror -Wp,-MD,.$(@F).d
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/common/fsimage.c
--- a/tools/libfsimage/common/fsimage.c Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/common/fsimage.c Wed Jan 17 14:57:04 2007 -0500
@@ -74,7 +74,7 @@ void fsi_close_fsimage(fsi_t *fsi)
pthread_mutex_lock(&fsi_lock);
fsi->f_plugin->fp_ops->fpo_umount(fsi);
(void) close(fsi->f_fd);
- fsip_fs_free(fsi);
+ free(fsi);
pthread_mutex_unlock(&fsi_lock);
}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/common/fsimage_grub.c
--- a/tools/libfsimage/common/fsimage_grub.c Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/common/fsimage_grub.c Wed Jan 17 14:57:04 2007 -0500
@@ -193,6 +193,7 @@ static int
static int
fsig_umount(fsi_t *fsi)
{
+ free(fsi->f_data);
return (0);
}
@@ -250,6 +251,7 @@ static int
static int
fsig_close(fsi_file_t *ffi)
{
+ free(ffi->ff_data);
fsip_file_free(ffi);
return (0);
}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/common/fsimage_plugin.c
--- a/tools/libfsimage/common/fsimage_plugin.c Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/common/fsimage_plugin.c Wed Jan 17 14:57:04 2007 -0500
@@ -40,13 +40,6 @@ fsip_fs_set_data(fsi_t *fsi, void *data)
fsi->f_data = data;
}
-void
-fsip_fs_free(fsi_t *fsi)
-{
- free(fsi->f_data);
- free(fsi);
-}
-
fsi_file_t *
fsip_file_alloc(fsi_t *fsi, void *data)
{
@@ -64,7 +57,6 @@ void
void
fsip_file_free(fsi_file_t *ffi)
{
- free(ffi->ff_data);
free(ffi);
}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/common/fsimage_plugin.h
--- a/tools/libfsimage/common/fsimage_plugin.h Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/common/fsimage_plugin.h Wed Jan 17 14:57:04 2007 -0500
@@ -50,11 +50,10 @@ typedef fsi_plugin_ops_t *
(*fsi_plugin_init_t)(int, fsi_plugin_t *, const char **);
void fsip_fs_set_data(fsi_t *, void *);
-void fsip_fs_free(fsi_t *);
fsi_file_t *fsip_file_alloc(fsi_t *, void *);
void fsip_file_free(fsi_file_t *);
-fsi_t * fsip_fs(fsi_file_t *ffi);
-uint64_t fsip_fs_offset(fsi_t *fsi);
+fsi_t *fsip_fs(fsi_file_t *);
+uint64_t fsip_fs_offset(fsi_t *);
void *fsip_fs_data(fsi_t *);
void *fsip_file_data(fsi_file_t *);
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/common/mapfile-GNU
--- a/tools/libfsimage/common/mapfile-GNU Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/common/mapfile-GNU Wed Jan 17 14:57:04 2007 -0500
@@ -1,5 +1,5 @@ VERSION {
VERSION {
- libfsimage.so.1.1 {
+ libfsimage.so.1.0 {
global:
fsi_open_fsimage;
fsi_close_fsimage;
@@ -10,7 +10,6 @@ VERSION {
fsi_pread_file;
fsip_fs_set_data;
- fsip_fs_free;
fsip_file_alloc;
fsip_file_free;
fsip_fs;
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/common/mapfile-SunOS
--- a/tools/libfsimage/common/mapfile-SunOS Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/common/mapfile-SunOS Wed Jan 17 14:57:04 2007 -0500
@@ -1,4 +1,4 @@ libfsimage.so.1.1 {
-libfsimage.so.1.1 {
+libfsimage.so.1.0 {
global:
fsi_open_fsimage;
fsi_close_fsimage;
@@ -9,7 +9,6 @@ libfsimage.so.1.1 {
fsi_pread_file;
fsip_fs_set_data;
- fsip_fs_free;
fsip_file_alloc;
fsip_file_free;
fsip_fs;
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/libfsimage/ext2fs-lib/ext2fs-lib.c
--- a/tools/libfsimage/ext2fs-lib/ext2fs-lib.c Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/libfsimage/ext2fs-lib/ext2fs-lib.c Wed Jan 17 14:57:04 2007 -0500
@@ -58,9 +58,11 @@ ext2lib_umount(fsi_t *fsi)
{
ext2_filsys *fs = fsip_fs_data(fsi);
if (ext2fs_close(*fs) != 0) {
+ free(fs);
errno = EINVAL;
return (-1);
}
+ free(fs);
return (0);
}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/python/xen/xend/XendConfig.py Wed Jan 17 14:57:04 2007 -0500
@@ -126,6 +126,7 @@ XENAPI_CFG_TYPES = {
'memory_dynamic_min': int,
'memory_dynamic_max': int,
'memory_actual': int,
+ 'cpus': list,
'vcpus_policy': str,
'vcpus_params': str,
'vcpus_number': int,
diff -r 02c6bf903a8e -r 58d6c9cb95c6 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Wed Jan 17 14:17:31 2007 -0500
+++ b/tools/python/xen/xend/XendNode.py Wed Jan 17 14:57:04 2007 -0500
@@ -365,14 +365,24 @@ class XendNode:
return [[k, info[k]] for k in ITEM_ORDER]
+ def xenschedinfo(self):
+ sched_id = self.xc.sched_id_get()
+ if sched_id == xen.lowlevel.xc.XEN_SCHEDULER_SEDF:
+ return 'sedf'
+ elif sched_id == xen.lowlevel.xc.XEN_SCHEDULER_CREDIT:
+ return 'credit'
+ else:
+ return 'unknown'
def xeninfo(self):
info = self.xc.xeninfo()
+ info['xen_scheduler'] = self.xenschedinfo()
ITEM_ORDER = ['xen_major',
'xen_minor',
'xen_extra',
'xen_caps',
+ 'xen_scheduler',
'xen_pagesize',
'platform_params',
'xen_changeset',
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c Wed Jan 17 14:17:31 2007 -0500
+++ b/xen/arch/x86/hvm/i8254.c Wed Jan 17 14:57:04 2007 -0500
@@ -182,11 +182,9 @@ void pit_time_fired(struct vcpu *v, void
s->count_load_time = hvm_get_guest_time(v);
}
-static inline void pit_load_count(PITChannelState *s, int val)
+static inline void pit_load_count(PITChannelState *s, int channel, int val)
{
u32 period;
- PITChannelState *ch0 =
- ¤t->domain->arch.hvm_domain.pl_time.vpit.channels[0];
if (val == 0)
val = 0x10000;
@@ -194,7 +192,7 @@ static inline void pit_load_count(PITCha
s->count = val;
period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
- if (s != ch0)
+ if (channel != 0)
return;
#ifdef DEBUG_PIT
@@ -282,17 +280,17 @@ static void pit_ioport_write(void *opaqu
switch(s->write_state) {
default:
case RW_STATE_LSB:
- pit_load_count(s, val);
+ pit_load_count(s, addr, val);
break;
case RW_STATE_MSB:
- pit_load_count(s, val << 8);
+ pit_load_count(s, addr, val << 8);
break;
case RW_STATE_WORD0:
s->write_latch = val;
s->write_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
- pit_load_count(s, s->write_latch | (val << 8));
+ pit_load_count(s, addr, s->write_latch | (val << 8));
s->write_state = RW_STATE_WORD0;
break;
}
@@ -369,7 +367,7 @@ static void pit_reset(void *opaque)
destroy_periodic_time(&s->pt);
s->mode = 0xff; /* the init mode */
s->gate = (i != 2);
- pit_load_count(s, 0);
+ pit_load_count(s, i, 0);
}
}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/arch/x86/microcode.c
--- a/xen/arch/x86/microcode.c Wed Jan 17 14:17:31 2007 -0500
+++ b/xen/arch/x86/microcode.c Wed Jan 17 14:57:04 2007 -0500
@@ -249,14 +249,14 @@ static int find_matching_ucodes (void)
}
total_size = get_totalsize(&mc_header);
- if ((cursor + total_size > user_buffer_size) || (total_size <
DEFAULT_UCODE_TOTALSIZE)) {
+ if (cursor + total_size > user_buffer_size) {
printk(KERN_ERR "microcode: error! Bad data in
microcode data file\n");
error = -EINVAL;
goto out;
}
data_size = get_datasize(&mc_header);
- if ((data_size + MC_HEADER_SIZE > total_size) || (data_size <
DEFAULT_UCODE_DATASIZE)) {
+ if (data_size + MC_HEADER_SIZE > total_size) {
printk(KERN_ERR "microcode: error! Bad data in
microcode data file\n");
error = -EINVAL;
goto out;
@@ -459,11 +459,6 @@ int microcode_update(XEN_GUEST_HANDLE(vo
{
int ret;
- if (len < DEFAULT_UCODE_TOTALSIZE) {
- printk(KERN_ERR "microcode: not enough data\n");
- return -EINVAL;
- }
-
if (len != (typeof(user_buffer_size))len) {
printk(KERN_ERR "microcode: too much data\n");
return -E2BIG;
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Jan 17 14:17:31 2007 -0500
+++ b/xen/arch/x86/mm.c Wed Jan 17 14:57:04 2007 -0500
@@ -3236,15 +3236,14 @@ static int ptwr_emulated_update(
if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) )
{
if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) &&
- (bytes == 4) &&
- !do_cmpxchg &&
+ (bytes == 4) && (addr & 4) && !do_cmpxchg &&
(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
{
/*
- * If this is a half-write to a PAE PTE then we assume that the
- * guest has simply got the two writes the wrong way round. We
- * zap the PRESENT bit on the assumption the bottom half will be
- * written immediately after we return to the guest.
+ * If this is an upper-half write to a PAE PTE then we assume that
+ * the guest has simply got the two writes the wrong way round. We
+ * zap the PRESENT bit on the assumption that the bottom half will
+ * be written immediately after we return to the guest.
*/
MEM_LOG("ptwr_emulate: fixing up invalid PAE PTE %"PRIpte,
l1e_get_intpte(nl1e));
@@ -3375,7 +3374,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
(page_get_owner(page) != d) )
goto bail;
- ptwr_ctxt.ctxt.regs = guest_cpu_user_regs();
+ ptwr_ctxt.ctxt.regs = regs;
ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
IS_COMPAT(d) ? 32 : BITS_PER_LONG;
ptwr_ctxt.cr2 = addr;
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c Wed Jan 17 14:17:31 2007 -0500
+++ b/xen/arch/x86/x86_emulate.c Wed Jan 17 14:57:04 2007 -0500
@@ -1156,7 +1156,9 @@ x86_emulate(
break;
}
- case 0x80 ... 0x83: /* Grp1 */
+ case 0x82: /* Grp1 (x86/32 only) */
+ generate_exception_if(mode_64bit(), EXC_UD);
+ case 0x80: case 0x81: case 0x83: /* Grp1 */
switch ( modrm_reg & 7 )
{
case 0: goto add;
@@ -1477,7 +1479,7 @@ x86_emulate(
emulate_1op("dec", dst, _regs.eflags);
break;
case 2: /* call (near) */
- case 3: /* jmp (near) */
+ case 4: /* jmp (near) */
if ( ((op_bytes = dst.bytes) != 8) && mode_64bit() )
{
dst.bytes = op_bytes = 8;
@@ -2049,12 +2051,13 @@ x86_emulate(
break;
case 0xba: /* Grp8 */
- switch ( modrm_reg & 3 )
- {
- case 0: goto bt;
- case 1: goto bts;
- case 2: goto btr;
- case 3: goto btc;
+ switch ( modrm_reg & 7 )
+ {
+ case 4: goto bt;
+ case 5: goto bts;
+ case 6: goto btr;
+ case 7: goto btc;
+ default: generate_exception_if(1, EXC_UD);
}
break;
@@ -2103,6 +2106,7 @@ x86_emulate(
#if defined(__i386__)
{
unsigned long old_lo, old_hi;
+ generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &old_lo, 4, ctxt)) ||
(rc = ops->read(ea.mem.seg, ea.mem.off+4, &old_hi, 4, ctxt)) )
goto done;
@@ -2129,6 +2133,7 @@ x86_emulate(
#elif defined(__x86_64__)
{
unsigned long old, new;
+ generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) != 0 )
goto done;
if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) ||
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/xen/config.h
--- a/xen/include/xen/config.h Wed Jan 17 14:17:31 2007 -0500
+++ b/xen/include/xen/config.h Wed Jan 17 14:57:04 2007 -0500
@@ -63,6 +63,8 @@
/* Linux 'checker' project. */
#define __iomem
#define __user
+#define __force
+#define __bitwise
#ifndef __ASSEMBLY__
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/xen/types.h
--- a/xen/include/xen/types.h Wed Jan 17 14:17:31 2007 -0500
+++ b/xen/include/xen/types.h Wed Jan 17 14:57:04 2007 -0500
@@ -51,4 +51,11 @@ struct domain;
struct domain;
struct vcpu;
+typedef __u16 __le16;
+typedef __u16 __be16;
+typedef __u32 __le32;
+typedef __u32 __be32;
+typedef __u64 __le64;
+typedef __u64 __be64;
+
#endif /* __TYPES_H__ */
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/arch/x86/Makefile
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/Makefile Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,29 @@
+#
+# x86 architecture specific makefiles.
+# It's is used for x86_32, x86_32y and x86_64
+#
+
+# Rebuild all after touching this/these extra file(s) (see mini-os.mk)
+SPEC_DEP = arch.mk
+
+# include arch.mk has to be before mini-os.mk!
+include arch.mk
+include ../../minios.mk
+
+# Sources here are all *.c *.S without $(TARGET_ARCH).S
+# This is handled in $(HEAD_ARCH_OBJ)
+ARCH_SRCS := $(wildcard *.c)
+
+# The objects built from the sources.
+ARCH_OBJS := $(patsubst %.c,%.o,$(ARCH_SRCS))
+
+all: $(ARCH_LIB)
+
+# $(HEAD_ARCH_OBJ) is only build here, needed on linking
+# in ../../Makefile.
+$(ARCH_LIB): $(ARCH_OBJS) $(HEAD_ARCH_OBJ)
+ $(AR) rv $(ARCH_LIB) $(ARCH_OBJS)
+
+clean:
+ rm -f $(ARCH_LIB) $(ARCH_OBJS)
+
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/arch/x86/arch.mk
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/arch.mk Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,28 @@
+#
+# Architecture special makerules for x86 family
+# (including x86_32, x86_32y and x86_64).
+#
+
+ifeq ($(TARGET_ARCH),x86_32)
+ARCH_CFLAGS := -m32 -march=i686
+ARCH_LDFLAGS := -m elf_i386
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+ifeq ($(TARGET_ARCH)$(pae),x86_32y)
+ARCH_CFLAGS := -DCONFIG_X86_PAE=1
+ARCH_ASFLAGS := -DCONFIG_X86_PAE=1
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+ifeq ($(TARGET_ARCH),x86_64)
+ARCH_CFLAGS := -m64 -mno-red-zone -fpic -fno-reorder-blocks
+ARCH_CFLAGS := -fno-asynchronous-unwind-tables
+ARCH_LDFLAGS := -m elf_x86_64
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/arch/x86/minios-x86_32.lds
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/minios-x86_32.lds Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,45 @@
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x0;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.gnu.warning)
+ } = 0x9090
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) }
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/arch/x86/minios-x86_64.lds
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/minios-x86_64.lds Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,54 @@
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x0;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.gnu.warning)
+ } = 0x9090
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) }
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/arch/x86/x86_32.S
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/x86_32.S Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,287 @@
+#include <os.h>
+#include <xen/arch-x86_32.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=Mini-OS"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
+ .ascii ",ELF_PADDR_OFFSET=0x0"
+ .ascii ",HYPERCALL_PAGE=0x2"
+#ifdef CONFIG_X86_PAE
+ .ascii ",PAE=yes"
+#else
+ .ascii ",PAE=no"
+#endif
+ .ascii ",LOADER=generic"
+ .byte 0
+.text
+
+.globl _start, shared_info, hypercall_page
+
+_start:
+ cld
+ lss stack_start,%esp
+ push %esi
+ call start_kernel
+
+stack_start:
+ .long stack+8192, __KERNEL_SS
+
+ /* Unpleasant -- the PTE that maps this page is actually overwritten */
+ /* to map the real shared-info page! :-) */
+ .org 0x1000
+shared_info:
+ .org 0x2000
+
+hypercall_page:
+ .org 0x3000
+
+ES = 0x20
+ORIG_EAX = 0x24
+EIP = 0x28
+CS = 0x2C
+
+#define ENTRY(X) .globl X ; X :
+
+#define SAVE_ALL \
+ cld; \
+ pushl %es; \
+ pushl %ds; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ movl $(__KERNEL_DS),%edx; \
+ movl %edx,%ds; \
+ movl %edx,%es;
+
+#define RESTORE_ALL \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax; \
+ popl %ds; \
+ popl %es; \
+ addl $4,%esp; \
+ iret; \
+
+ENTRY(divide_error)
+ pushl $0 # no error code
+ pushl $do_divide_error
+do_exception:
+ pushl %ds
+ pushl %eax
+ xorl %eax, %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax # eax = -1
+ pushl %ecx
+ pushl %ebx
+ cld
+ movl %es, %ecx
+ movl ES(%esp), %edi # get the function address
+ movl ORIG_EAX(%esp), %edx # get the error code
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ movl %esp,%eax # pt_regs pointer
+ pushl %edx
+ pushl %eax
+ call *%edi
+ jmp ret_from_exception
+
+ret_from_exception:
+ movb CS(%esp),%cl
+ test $2,%cl # slow return to ring 2 or 3
+ jne safesti
+ RESTORE_ALL
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until weve done all processing. HOWEVER, we must enable events before
+# popping the stack frame (cant be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so wed
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+ pushl %eax
+ SAVE_ALL
+ movl EIP(%esp),%eax
+ cmpl $scrit,%eax
+ jb 11f
+ cmpl $ecrit,%eax
+ jb critical_region_fixup
+11: push %esp
+ call do_hypervisor_callback
+ add $4,%esp
+ movl HYPERVISOR_shared_info,%esi
+ xorl %eax,%eax
+ movb CS(%esp),%cl
+ test $2,%cl # slow return to ring 2 or 3
+ jne safesti
+safesti:movb $0,1(%esi) # reenable event callbacks
+scrit: /**** START OF CRITICAL REGION ****/
+ testb $0xFF,(%esi)
+ jnz 14f # process more events if necessary...
+ RESTORE_ALL
+14: movb $1,1(%esi)
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame.
+critical_region_fixup:
+ addl $critical_fixup_table-scrit,%eax
+ movzbl (%eax),%eax # %eax contains num bytes popped
+ mov %esp,%esi
+ add %eax,%esi # %esi points at end of src region
+ mov %esp,%edi
+ add $0x34,%edi # %edi points at end of dst region
+ mov %eax,%ecx
+ shr $2,%ecx # convert words to bytes
+ je 16f # skip loop if nothing to copy
+15: subl $4,%esi # pre-decrementing copy loop
+ subl $4,%edi
+ movl (%esi),%eax
+ movl %eax,(%edi)
+ loop 15b
+16: movl %edi,%esp # final %edi is top of merged stack
+ jmp 11b
+
+critical_fixup_table:
+ .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
+ .byte 0x00,0x00 # jne 14f
+ .byte 0x00 # pop %ebx
+ .byte 0x04 # pop %ecx
+ .byte 0x08 # pop %edx
+ .byte 0x0c # pop %esi
+ .byte 0x10 # pop %edi
+ .byte 0x14 # pop %ebp
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+ .byte 0x24,0x24,0x24 # add $4,%esp
+ .byte 0x28 # iret
+ .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
+ .byte 0x00,0x00 # jmp 11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+ pop %ds
+ pop %es
+ pop %fs
+ pop %gs
+ iret
+
+ENTRY(coprocessor_error)
+ pushl $0
+ pushl $do_coprocessor_error
+ jmp do_exception
+
+ENTRY(simd_coprocessor_error)
+ pushl $0
+ pushl $do_simd_coprocessor_error
+ jmp do_exception
+
+ENTRY(device_not_available)
+ iret
+
+ENTRY(debug)
+ pushl $0
+ pushl $do_debug
+ jmp do_exception
+
+ENTRY(int3)
+ pushl $0
+ pushl $do_int3
+ jmp do_exception
+
+ENTRY(overflow)
+ pushl $0
+ pushl $do_overflow
+ jmp do_exception
+
+ENTRY(bounds)
+ pushl $0
+ pushl $do_bounds
+ jmp do_exception
+
+ENTRY(invalid_op)
+ pushl $0
+ pushl $do_invalid_op
+ jmp do_exception
+
+
+ENTRY(coprocessor_segment_overrun)
+ pushl $0
+ pushl $do_coprocessor_segment_overrun
+ jmp do_exception
+
+
+ENTRY(invalid_TSS)
+ pushl $do_invalid_TSS
+ jmp do_exception
+
+
+ENTRY(segment_not_present)
+ pushl $do_segment_not_present
+ jmp do_exception
+
+
+ENTRY(stack_segment)
+ pushl $do_stack_segment
+ jmp do_exception
+
+
+ENTRY(general_protection)
+ pushl $do_general_protection
+ jmp do_exception
+
+
+ENTRY(alignment_check)
+ pushl $do_alignment_check
+ jmp do_exception
+
+
+ENTRY(page_fault)
+ pushl $do_page_fault
+ jmp do_exception
+
+ENTRY(machine_check)
+ pushl $0
+ pushl $do_machine_check
+ jmp do_exception
+
+
+ENTRY(spurious_interrupt_bug)
+ pushl $0
+ pushl $do_spurious_interrupt_bug
+ jmp do_exception
+
+
+
+ENTRY(thread_starter)
+ popl %eax
+ popl %ebx
+ pushl %eax
+ call *%ebx
+ call exit_thread
+
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/arch/x86/x86_64.S
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/x86_64.S Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,385 @@
+#include <os.h>
+#include <xen/features.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=Mini-OS"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
+ .ascii ",ELF_PADDR_OFFSET=0x0"
+ .ascii ",HYPERCALL_PAGE=0x2"
+ .ascii ",LOADER=generic"
+ .byte 0
+.text
+
+#define ENTRY(X) .globl X ; X :
+.globl _start, shared_info, hypercall_page
+
+
+_start:
+ cld
+ movq stack_start(%rip),%rsp
+ movq %rsi,%rdi
+ call start_kernel
+
+stack_start:
+ .quad stack+8192
+
+ /* Unpleasant -- the PTE that maps this page is actually overwritten */
+ /* to map the real shared-info page! :-) */
+ .org 0x1000
+shared_info:
+ .org 0x2000
+
+hypercall_page:
+ .org 0x3000
+
+
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending /* 0 */
+#define evtchn_upcall_mask 1
+
+NMI_MASK = 0x80000000
+
+#define RDI 112
+#define ORIG_RAX 120 /* + error_code */
+#define EFLAGS 144
+
+#define REST_SKIP 6*8
+.macro SAVE_REST
+ subq $REST_SKIP,%rsp
+# CFI_ADJUST_CFA_OFFSET REST_SKIP
+ movq %rbx,5*8(%rsp)
+# CFI_REL_OFFSET rbx,5*8
+ movq %rbp,4*8(%rsp)
+# CFI_REL_OFFSET rbp,4*8
+ movq %r12,3*8(%rsp)
+# CFI_REL_OFFSET r12,3*8
+ movq %r13,2*8(%rsp)
+# CFI_REL_OFFSET r13,2*8
+ movq %r14,1*8(%rsp)
+# CFI_REL_OFFSET r14,1*8
+ movq %r15,(%rsp)
+# CFI_REL_OFFSET r15,0*8
+.endm
+
+
+.macro RESTORE_REST
+ movq (%rsp),%r15
+# CFI_RESTORE r15
+ movq 1*8(%rsp),%r14
+# CFI_RESTORE r14
+ movq 2*8(%rsp),%r13
+# CFI_RESTORE r13
+ movq 3*8(%rsp),%r12
+# CFI_RESTORE r12
+ movq 4*8(%rsp),%rbp
+# CFI_RESTORE rbp
+ movq 5*8(%rsp),%rbx
+# CFI_RESTORE rbx
+ addq $REST_SKIP,%rsp
+# CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
+.endm
+
+
+#define ARG_SKIP 9*8
+.macro RESTORE_ARGS
skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
+ .if \skipr11
+ .else
+ movq (%rsp),%r11
+# CFI_RESTORE r11
+ .endif
+ .if \skipr8910
+ .else
+ movq 1*8(%rsp),%r10
+# CFI_RESTORE r10
+ movq 2*8(%rsp),%r9
+# CFI_RESTORE r9
+ movq 3*8(%rsp),%r8
+# CFI_RESTORE r8
+ .endif
+ .if \skiprax
+ .else
+ movq 4*8(%rsp),%rax
+# CFI_RESTORE rax
+ .endif
+ .if \skiprcx
+ .else
+ movq 5*8(%rsp),%rcx
+# CFI_RESTORE rcx
+ .endif
+ .if \skiprdx
+ .else
+ movq 6*8(%rsp),%rdx
+# CFI_RESTORE rdx
+ .endif
+ movq 7*8(%rsp),%rsi
+# CFI_RESTORE rsi
+ movq 8*8(%rsp),%rdi
+# CFI_RESTORE rdi
+ .if ARG_SKIP+\addskip > 0
+ addq $ARG_SKIP+\addskip,%rsp
+# CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
+ .endif
+.endm
+
+
+.macro HYPERVISOR_IRET flag
+# testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */
+# jnz 2f /* there is no userspace? */
+ testl $NMI_MASK,2*8(%rsp)
+ jnz 2f
+
+ testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
+ jnz 1f
+
+ /* Direct iret to kernel space. Correct CS and SS. */
+ orb $3,1*8(%rsp)
+ orb $3,4*8(%rsp)
+1: iretq
+
+2: /* Slow iret via hypervisor. */
+ andl $~NMI_MASK, 16(%rsp)
+ pushq $\flag
+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
+.endm
+
+/*
+ * Exception entry point. This expects an error code/orig_rax on the stack
+ * and the exception handler in %rax.
+ */
+ENTRY(error_entry)
+# _frame RDI
+ /* rdi slot contains rax, oldrax contains error code */
+ cld
+ subq $14*8,%rsp
+# CFI_ADJUST_CFA_OFFSET (14*8)
+ movq %rsi,13*8(%rsp)
+# CFI_REL_OFFSET rsi,RSI
+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
+ movq %rdx,12*8(%rsp)
+# CFI_REL_OFFSET rdx,RDX
+ movq %rcx,11*8(%rsp)
+# CFI_REL_OFFSET rcx,RCX
+ movq %rsi,10*8(%rsp) /* store rax */
+# CFI_REL_OFFSET rax,RAX
+ movq %r8, 9*8(%rsp)
+# CFI_REL_OFFSET r8,R8
+ movq %r9, 8*8(%rsp)
+# CFI_REL_OFFSET r9,R9
+ movq %r10,7*8(%rsp)
+# CFI_REL_OFFSET r10,R10
+ movq %r11,6*8(%rsp)
+# CFI_REL_OFFSET r11,R11
+ movq %rbx,5*8(%rsp)
+# CFI_REL_OFFSET rbx,RBX
+ movq %rbp,4*8(%rsp)
+# CFI_REL_OFFSET rbp,RBP
+ movq %r12,3*8(%rsp)
+# CFI_REL_OFFSET r12,R12
+ movq %r13,2*8(%rsp)
+# CFI_REL_OFFSET r13,R13
+ movq %r14,1*8(%rsp)
+# CFI_REL_OFFSET r14,R14
+ movq %r15,(%rsp)
+# CFI_REL_OFFSET r15,R15
+#if 0
+ cmpl $__KERNEL_CS,CS(%rsp)
+ je error_kernelspace
+#endif
+error_call_handler:
+ movq %rdi, RDI(%rsp)
+ movq %rsp,%rdi
+ movq ORIG_RAX(%rsp),%rsi # get error code
+ movq $-1,ORIG_RAX(%rsp)
+ call *%rax
+
+.macro zeroentry sym
+# INTR_FRAME
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ pushq $0 /* push error code/oldrax */
+# CFI_ADJUST_CFA_OFFSET 8
+ pushq %rax /* push real oldrax to the rdi slot */
+# CFI_ADJUST_CFA_OFFSET 8
+ leaq \sym(%rip),%rax
+ jmp error_entry
+# CFI_ENDPROC
+.endm
+
+.macro errorentry sym
+# XCPT_FRAME
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* rsp points to the error code */
+ pushq %rax
+# CFI_ADJUST_CFA_OFFSET 8
+ leaq \sym(%rip),%rax
+ jmp error_entry
+# CFI_ENDPROC
+.endm
+
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+
+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg)
; \
+ XEN_LOCKED_UNBLOCK_EVENTS(reg)
; \
+ XEN_PUT_VCPU_INFO(reg)
+
+
+
+ENTRY(hypervisor_callback)
+ zeroentry hypervisor_callback2
+
+ENTRY(hypervisor_callback2)
+ movq %rdi, %rsp
+11: movq %gs:8,%rax
+ incl %gs:0
+ cmovzq %rax,%rsp
+ pushq %rdi
+ call do_hypervisor_callback
+ popq %rsp
+ decl %gs:0
+ jmp error_exit
+
+# ALIGN
+restore_all_enable_events:
+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
+
+scrit: /**** START OF CRITICAL REGION ****/
+ XEN_TEST_PENDING(%rsi)
+ jnz 14f # process more events if necessary...
+ XEN_PUT_VCPU_INFO(%rsi)
+ RESTORE_ARGS 0,8,0
+ HYPERVISOR_IRET 0
+
+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
+ XEN_PUT_VCPU_INFO(%rsi)
+ SAVE_REST
+ movq %rsp,%rdi # set the argument again
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+
+
+retint_kernel:
+retint_restore_args:
+ movl EFLAGS-REST_SKIP(%rsp), %eax
+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
+ XEN_GET_VCPU_INFO(%rsi)
+ andb evtchn_upcall_mask(%rsi),%al
+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
+ jnz restore_all_enable_events # != 0 => enable event delivery
+ XEN_PUT_VCPU_INFO(%rsi)
+
+ RESTORE_ARGS 0,8,0
+ HYPERVISOR_IRET 0
+
+
+error_exit:
+ RESTORE_REST
+/* cli */
+ XEN_BLOCK_EVENTS(%rsi)
+ jmp retint_kernel
+
+
+
+ENTRY(failsafe_callback)
+ popq %rcx
+ popq %r11
+ iretq
+
+
+ENTRY(coprocessor_error)
+ zeroentry do_coprocessor_error
+
+
+ENTRY(simd_coprocessor_error)
+ zeroentry do_simd_coprocessor_error
+
+
+ENTRY(device_not_available)
+ zeroentry do_device_not_available
+
+
+ENTRY(debug)
+# INTR_FRAME
+# CFI_ADJUST_CFA_OFFSET 8 */
+ zeroentry do_debug
+# CFI_ENDPROC
+
+
+ENTRY(int3)
+# INTR_FRAME
+# CFI_ADJUST_CFA_OFFSET 8 */
+ zeroentry do_int3
+# CFI_ENDPROC
+
+ENTRY(overflow)
+ zeroentry do_overflow
+
+
+ENTRY(bounds)
+ zeroentry do_bounds
+
+
+ENTRY(invalid_op)
+ zeroentry do_invalid_op
+
+
+ENTRY(coprocessor_segment_overrun)
+ zeroentry do_coprocessor_segment_overrun
+
+
+ENTRY(invalid_TSS)
+ errorentry do_invalid_TSS
+
+
+ENTRY(segment_not_present)
+ errorentry do_segment_not_present
+
+
+/* runs on exception stack */
+ENTRY(stack_segment)
+# XCPT_FRAME
+ errorentry do_stack_segment
+# CFI_ENDPROC
+
+
+ENTRY(general_protection)
+ errorentry do_general_protection
+
+
+ENTRY(alignment_check)
+ errorentry do_alignment_check
+
+
+ENTRY(divide_error)
+ zeroentry do_divide_error
+
+
+ENTRY(spurious_interrupt_bug)
+ zeroentry do_spurious_interrupt_bug
+
+
+ENTRY(page_fault)
+ errorentry do_page_fault
+
+
+
+
+
+ENTRY(thread_starter)
+ popq %rdi
+ popq %rbx
+ call *%rbx
+ call exit_thread
+
+
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/minios.mk
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/minios.mk Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,62 @@
+#
+# The file contains the common make rules for building mini-os.
+#
+
+debug = y
+
+# Define some default flags.
+# NB. '-Wcast-qual' is nasty, so I omitted it.
+DEF_CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
+DEF_CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
+DEF_CFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION)
+
+DEF_ASFLAGS = -D__ASSEMBLY__
+
+ifeq ($(debug),y)
+DEF_CFLAGS += -g
+else
+DEF_CFLAGS += -O3
+endif
+
+# Build the CFLAGS and ASFLAGS for compiling and assembling.
+# DEF_... flags are the common mini-os flags,
+# ARCH_... flags may be defined in arch/$(TARGET_ARCH_FAM/rules.mk
+CFLAGS := $(DEF_CFLAGS) $(ARCH_CFLAGS)
+ASFLAGS := $(DEF_ASFLAGS) $(ARCH_ASFLAGS)
+
+# The path pointing to the architecture specific header files.
+ARCH_SPEC_INC := $(MINI-OS_ROOT)/include/$(TARGET_ARCH_FAM)
+
+# Find all header files for checking dependencies.
+HDRS := $(wildcard $(MINI-OS_ROOT)/include/*.h)
+HDRS += $(wildcard $(MINI-OS_ROOT)/include/xen/*.h)
+HDRS += $(wildcard $(ARCH_SPEC_INC)/*.h)
+# For special wanted header directories.
+extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h))
+HDRS += $(extra_heads)
+
+# Add the special header directories to the include paths.
+extra_incl := $(foreach dir,$(EXTRA_INC),-I$(MINI-OS_ROOT)/include/$(dir))
+override CPPFLAGS := -I$(MINI-OS_ROOT)/include $(CPPFLAGS) -I$(ARCH_SPEC_INC)
$(extra_incl)
+
+# The name of the architecture specific library.
+# This is on x86_32: libx86_32.a
+# $(ARCH_LIB) has to built in the architecture specific directory.
+ARCH_LIB_NAME = $(TARGET_ARCH)
+ARCH_LIB := lib$(ARCH_LIB_NAME).a
+
+# This object contains the entrypoint for startup from Xen.
+# $(HEAD_ARCH_OBJ) has to be built in the architecture specific directory.
+HEAD_ARCH_OBJ := $(TARGET_ARCH).o
+HEAD_OBJ := $(TARGET_ARCH_DIR)/$(HEAD_ARCH_OBJ)
+
+
+%.o: %.c $(HDRS) Makefile $(SPEC_DEPENDS)
+ $(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
+
+%.o: %.S $(HDRS) Makefile $(SPEC_DEPENDS)
+ $(CC) $(ASFLAGS) $(CPPFLAGS) -c $< -o $@
+
+
+
+
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/asm-powerpc/byteorder.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-powerpc/byteorder.h Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,80 @@
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <xen/compiler.h>
+
+static inline __u16 ld_le16(const volatile __u16 *addr)
+{
+ __u16 val;
+
+ asm volatile ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static inline void st_le16(volatile __u16 *addr, const __u16 val)
+{
+ asm volatile ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline __u32 ld_le32(const volatile __u32 *addr)
+{
+ __u32 val;
+
+ asm volatile ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static inline void st_le32(volatile __u32 *addr, const __u32 val)
+{
+ asm volatile ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline __attribute_const__ __u16 ___arch__swab16(__u16 value)
+{
+ __u16 result;
+
+ asm("rlwimi %0,%1,8,16,23"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 8));
+ return result;
+}
+
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 value)
+{
+ __u32 result;
+
+ asm("rlwimi %0,%1,24,16,23\n\t"
+ "rlwimi %0,%1,8,8,15\n\t"
+ "rlwimi %0,%1,24,0,7"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 24));
+ return result;
+}
+
+#define __arch__swab16(x) ___arch__swab16(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+/* The same, but returns converted value from the location pointer by addr. */
+#define __arch__swab16p(addr) ld_le16(addr)
+#define __arch__swab32p(addr) ld_le32(addr)
+
+/* The same, but do the conversion in situ, ie. put the value back to addr. */
+#define __arch__swab16s(addr) st_le16(addr,*addr)
+#define __arch__swab32s(addr) st_le32(addr,*addr)
+
+#define __BYTEORDER_HAS_U64__
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+
+#include <xen/byteorder/big_endian.h>
+
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/asm-x86/byteorder.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/byteorder.h Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,36 @@
+#ifndef __ASM_X86_BYTEORDER_H__
+#define __ASM_X86_BYTEORDER_H__
+
+#include <asm/types.h>
+#include <xen/compiler.h>
+
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+ asm("bswap %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
+{
+ union {
+ struct { __u32 a,b; } s;
+ __u64 u;
+ } v;
+ v.u = val;
+ asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
+ : "=r" (v.s.a), "=r" (v.s.b)
+ : "0" (v.s.a), "1" (v.s.b));
+ return v.u;
+}
+
+/* Do not define swab16. Gcc is smart enough to recognize "C" version and
+ convert it into rotation or exhange. */
+
+#define __arch__swab64(x) ___arch__swab64(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+#define __BYTEORDER_HAS_U64__
+
+#include <xen/byteorder/little_endian.h>
+
+#endif /* __ASM_X86_BYTEORDER_H__ */
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/xen/byteorder/big_endian.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/big_endian.h Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_BIG_ENDIAN_H__
+#define __XEN_BYTEORDER_BIG_ENDIAN_H__
+
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#endif
+#ifndef __BIG_ENDIAN_BITFIELD
+#define __BIG_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)(__u32)(x))
+#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
+#define __constant_htons(x) ((__force __be16)(__u16)(x))
+#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
+#define __constant_le64_to_cpu(x) ___constant_swab64((__force
__u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
+#define __constant_le32_to_cpu(x) ___constant_swab32((__force
__u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
+#define __constant_le16_to_cpu(x) ___constant_swab16((__force
__u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
+#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
+#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
+#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)__swab64p(p);
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)__swab32p(p);
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)__swab16p(p);
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)*p;
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)*p;
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)*p;
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return (__force __u16)*p;
+}
+#define __cpu_to_le64s(x) __swab64s((x))
+#define __le64_to_cpus(x) __swab64s((x))
+#define __cpu_to_le32s(x) __swab32s((x))
+#define __le32_to_cpus(x) __swab32s((x))
+#define __cpu_to_le16s(x) __swab16s((x))
+#define __le16_to_cpus(x) __swab16s((x))
+#define __cpu_to_be64s(x) do {} while (0)
+#define __be64_to_cpus(x) do {} while (0)
+#define __cpu_to_be32s(x) do {} while (0)
+#define __be32_to_cpus(x) do {} while (0)
+#define __cpu_to_be16s(x) do {} while (0)
+#define __be16_to_cpus(x) do {} while (0)
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_BIG_ENDIAN_H__ */
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/xen/byteorder/generic.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/generic.h Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,68 @@
+#ifndef __XEN_BYTEORDER_GENERIC_H__
+#define __XEN_BYTEORDER_GENERIC_H__
+
+/*
+ * Generic Byte-reordering support
+ *
+ * The "... p" macros, like le64_to_cpup, can be used with pointers
+ * to unaligned data, but there will be a performance penalty on
+ * some architectures. Use get_unaligned for unaligned data.
+ *
+ * The following macros are to be defined by <asm/byteorder.h>:
+ *
+ * Conversion of XX-bit integers (16- 32- or 64-)
+ * between native CPU format and little/big endian format
+ * 64-bit stuff only defined for proper architectures
+ * cpu_to_[bl]eXX(__uXX x)
+ * [bl]eXX_to_cpu(__uXX x)
+ *
+ * The same, but takes a pointer to the value to convert
+ * cpu_to_[bl]eXXp(__uXX x)
+ * [bl]eXX_to_cpup(__uXX x)
+ *
+ * The same, but change in situ
+ * cpu_to_[bl]eXXs(__uXX x)
+ * [bl]eXX_to_cpus(__uXX x)
+ *
+ * See asm-foo/byteorder.h for examples of how to provide
+ * architecture-optimized versions
+ */
+
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+
+#endif /* __XEN_BYTEORDER_GENERIC_H__ */
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/xen/byteorder/little_endian.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/little_endian.h Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+#define __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
+#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force
__u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force
__u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_be16_to_cpu(x) ___constant_swab16((__force
__u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
+#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
+#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
+#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)*p;
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)*p;
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)*p;
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return (__force __u16)*p;
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)__swab64p(p);
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)__swab32p(p);
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)__swab16p(p);
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+#define __cpu_to_le64s(x) do {} while (0)
+#define __le64_to_cpus(x) do {} while (0)
+#define __cpu_to_le32s(x) do {} while (0)
+#define __le32_to_cpus(x) do {} while (0)
+#define __cpu_to_le16s(x) do {} while (0)
+#define __le16_to_cpus(x) do {} while (0)
+#define __cpu_to_be64s(x) __swab64s((x))
+#define __be64_to_cpus(x) __swab64s((x))
+#define __cpu_to_be32s(x) __swab32s((x))
+#define __be32_to_cpus(x) __swab32s((x))
+#define __cpu_to_be16s(x) __swab16s((x))
+#define __be16_to_cpus(x) __swab16s((x))
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_LITTLE_ENDIAN_H__ */
diff -r 02c6bf903a8e -r 58d6c9cb95c6 xen/include/xen/byteorder/swab.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/swab.h Wed Jan 17 14:57:04 2007 -0500
@@ -0,0 +1,185 @@
+#ifndef __XEN_BYTEORDER_SWAB_H__
+#define __XEN_BYTEORDER_SWAB_H__
+
+/*
+ * Byte-swapping, independently from CPU endianness
+ * swabXX[ps]?(foo)
+ *
+ * Francois-Rene Rideau <fare@xxxxxxxxx> 19971205
+ * separated swab functions from cpu_to_XX,
+ * to clean up support for bizarre-endian architectures.
+ */
+
+#include <xen/compiler.h>
+
+/* casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define ___swab16(x) \
+({ \
+ __u16 __x = (x); \
+ ((__u16)( \
+ (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
+})
+
+#define ___swab32(x) \
+({ \
+ __u32 __x = (x); \
+ ((__u32)( \
+ (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
+})
+
+#define ___swab64(x) \
+({ \
+ __u64 __x = (x); \
+ ((__u64)( \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
+})
+
+#define ___constant_swab16(x) \
+ ((__u16)( \
+ (((__u16)(x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(x) & (__u16)0xff00U) >> 8) ))
+#define ___constant_swab32(x) \
+ ((__u32)( \
+ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
+#define ___constant_swab64(x) \
+ ((__u64)( \
+ (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
+
+/*
+ * provide defaults when no architecture-specific optimization is detected
+ */
+#ifndef __arch__swab16
+# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
+#endif
+#ifndef __arch__swab32
+# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+#endif
+#ifndef __arch__swab64
+# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+#endif
+
+#ifndef __arch__swab16p
+# define __arch__swab16p(x) __arch__swab16(*(x))
+#endif
+#ifndef __arch__swab32p
+# define __arch__swab32p(x) __arch__swab32(*(x))
+#endif
+#ifndef __arch__swab64p
+# define __arch__swab64p(x) __arch__swab64(*(x))
+#endif
+
+#ifndef __arch__swab16s
+# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
+#endif
+#ifndef __arch__swab32s
+# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
+#endif
+#ifndef __arch__swab64s
+# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
+#endif
+
+
+/*
+ * Allow constant folding
+ */
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+# define __swab16(x) \
+(__builtin_constant_p((__u16)(x)) ? \
+ ___swab16((x)) : \
+ __fswab16((x)))
+# define __swab32(x) \
+(__builtin_constant_p((__u32)(x)) ? \
+ ___swab32((x)) : \
+ __fswab32((x)))
+# define __swab64(x) \
+(__builtin_constant_p((__u64)(x)) ? \
+ ___swab64((x)) : \
+ __fswab64((x)))
+#else
+# define __swab16(x) __fswab16(x)
+# define __swab32(x) __fswab32(x)
+# define __swab64(x) __fswab64(x)
+#endif /* OPTIMIZE */
+
+
+static inline __attribute_const__ __u16 __fswab16(__u16 x)
+{
+ return __arch__swab16(x);
+}
+static inline __u16 __swab16p(const __u16 *x)
+{
+ return __arch__swab16p(x);
+}
+static inline void __swab16s(__u16 *addr)
+{
+ __arch__swab16s(addr);
+}
+
+static inline __attribute_const__ __u32 __fswab32(__u32 x)
+{
+ return __arch__swab32(x);
+}
+static inline __u32 __swab32p(const __u32 *x)
+{
+ return __arch__swab32p(x);
+}
+static inline void __swab32s(__u32 *addr)
+{
+ __arch__swab32s(addr);
+}
+
+#ifdef __BYTEORDER_HAS_U64__
+static inline __attribute_const__ __u64 __fswab64(__u64 x)
+{
+# ifdef __SWAB_64_THRU_32__
+ __u32 h = x >> 32;
+ __u32 l = x & ((1ULL<<32)-1);
+ return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
+# else
+ return __arch__swab64(x);
+# endif
+}
+static inline __u64 __swab64p(const __u64 *x)
+{
+ return __arch__swab64p(x);
+}
+static inline void __swab64s(__u64 *addr)
+{
+ __arch__swab64s(addr);
+}
+#endif /* __BYTEORDER_HAS_U64__ */
+
+#define swab16 __swab16
+#define swab32 __swab32
+#define swab64 __swab64
+#define swab16p __swab16p
+#define swab32p __swab32p
+#define swab64p __swab64p
+#define swab16s __swab16s
+#define swab32s __swab32s
+#define swab64s __swab64s
+
+#endif /* __XEN_BYTEORDER_SWAB_H__ */
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/minios-x86_32.lds
--- a/extras/mini-os/minios-x86_32.lds Wed Jan 17 14:17:31 2007 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-SECTIONS
-{
- . = 0x0;
- _text = .; /* Text and read-only data */
- .text : {
- *(.text)
- *(.gnu.warning)
- } = 0x9090
-
- _etext = .; /* End of text section */
-
- .rodata : { *(.rodata) *(.rodata.*) }
-
- .data : { /* Data */
- *(.data)
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss)
- }
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.text.exit)
- *(.data.exit)
- *(.exitcall.exit)
- }
-
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/minios-x86_64.lds
--- a/extras/mini-os/minios-x86_64.lds Wed Jan 17 14:17:31 2007 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
-OUTPUT_ARCH(i386:x86-64)
-ENTRY(_start)
-SECTIONS
-{
- . = 0x0;
- _text = .; /* Text and read-only data */
- .text : {
- *(.text)
- *(.gnu.warning)
- } = 0x9090
-
- _etext = .; /* End of text section */
-
- .rodata : { *(.rodata) *(.rodata.*) }
-
- .data : { /* Data */
- *(.data)
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- . = ALIGN(8192); /* init_task */
- .data.init_task : { *(.data.init_task) }
-
- . = ALIGN(4096);
- .data.page_aligned : { *(.data.idt) }
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss)
- }
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.text.exit)
- *(.data.exit)
- *(.exitcall.exit)
- }
-
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-}
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/x86_32.S
--- a/extras/mini-os/x86_32.S Wed Jan 17 14:17:31 2007 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,287 +0,0 @@
-#include <os.h>
-#include <xen/arch-x86_32.h>
-
-.section __xen_guest
- .ascii "GUEST_OS=Mini-OS"
- .ascii ",XEN_VER=xen-3.0"
- .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
- .ascii ",ELF_PADDR_OFFSET=0x0"
- .ascii ",HYPERCALL_PAGE=0x2"
-#ifdef CONFIG_X86_PAE
- .ascii ",PAE=yes"
-#else
- .ascii ",PAE=no"
-#endif
- .ascii ",LOADER=generic"
- .byte 0
-.text
-
-.globl _start, shared_info, hypercall_page
-
-_start:
- cld
- lss stack_start,%esp
- push %esi
- call start_kernel
-
-stack_start:
- .long stack+8192, __KERNEL_SS
-
- /* Unpleasant -- the PTE that maps this page is actually overwritten */
- /* to map the real shared-info page! :-) */
- .org 0x1000
-shared_info:
- .org 0x2000
-
-hypercall_page:
- .org 0x3000
-
-ES = 0x20
-ORIG_EAX = 0x24
-EIP = 0x28
-CS = 0x2C
-
-#define ENTRY(X) .globl X ; X :
-
-#define SAVE_ALL \
- cld; \
- pushl %es; \
- pushl %ds; \
- pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- movl $(__KERNEL_DS),%edx; \
- movl %edx,%ds; \
- movl %edx,%es;
-
-#define RESTORE_ALL \
- popl %ebx; \
- popl %ecx; \
- popl %edx; \
- popl %esi; \
- popl %edi; \
- popl %ebp; \
- popl %eax; \
- popl %ds; \
- popl %es; \
- addl $4,%esp; \
- iret; \
-
-ENTRY(divide_error)
- pushl $0 # no error code
- pushl $do_divide_error
-do_exception:
- pushl %ds
- pushl %eax
- xorl %eax, %eax
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %edx
- decl %eax # eax = -1
- pushl %ecx
- pushl %ebx
- cld
- movl %es, %ecx
- movl ES(%esp), %edi # get the function address
- movl ORIG_EAX(%esp), %edx # get the error code
- movl %eax, ORIG_EAX(%esp)
- movl %ecx, ES(%esp)
- movl $(__KERNEL_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
- movl %esp,%eax # pt_regs pointer
- pushl %edx
- pushl %eax
- call *%edi
- jmp ret_from_exception
-
-ret_from_exception:
- movb CS(%esp),%cl
- test $2,%cl # slow return to ring 2 or 3
- jne safesti
- RESTORE_ALL
-
-# A note on the "critical region" in our callback handler.
-# We want to avoid stacking callback handlers due to events occurring
-# during handling of the last event. To do this, we keep events disabled
-# until weve done all processing. HOWEVER, we must enable events before
-# popping the stack frame (cant be done atomically) and so it would still
-# be possible to get enough handler activations to overflow the stack.
-# Although unlikely, bugs of that kind are hard to track down, so wed
-# like to avoid the possibility.
-# So, on entry to the handler we detect whether we interrupted an
-# existing activation in its critical region -- if so, we pop the current
-# activation and restart the handler using the previous one.
-ENTRY(hypervisor_callback)
- pushl %eax
- SAVE_ALL
- movl EIP(%esp),%eax
- cmpl $scrit,%eax
- jb 11f
- cmpl $ecrit,%eax
- jb critical_region_fixup
-11: push %esp
- call do_hypervisor_callback
- add $4,%esp
- movl HYPERVISOR_shared_info,%esi
- xorl %eax,%eax
- movb CS(%esp),%cl
- test $2,%cl # slow return to ring 2 or 3
- jne safesti
-safesti:movb $0,1(%esi) # reenable event callbacks
-scrit: /**** START OF CRITICAL REGION ****/
- testb $0xFF,(%esi)
- jnz 14f # process more events if necessary...
- RESTORE_ALL
-14: movb $1,1(%esi)
- jmp 11b
-ecrit: /**** END OF CRITICAL REGION ****/
-# [How we do the fixup]. We want to merge the current stack frame with the
-# just-interrupted frame. How we do this depends on where in the critical
-# region the interrupted handler was executing, and so how many saved
-# registers are in each frame. We do this quickly using the lookup table
-# 'critical_fixup_table'. For each byte offset in the critical region, it
-# provides the number of bytes which have already been popped from the
-# interrupted stack frame.
-critical_region_fixup:
- addl $critical_fixup_table-scrit,%eax
- movzbl (%eax),%eax # %eax contains num bytes popped
- mov %esp,%esi
- add %eax,%esi # %esi points at end of src region
- mov %esp,%edi
- add $0x34,%edi # %edi points at end of dst region
- mov %eax,%ecx
- shr $2,%ecx # convert words to bytes
- je 16f # skip loop if nothing to copy
-15: subl $4,%esi # pre-decrementing copy loop
- subl $4,%edi
- movl (%esi),%eax
- movl %eax,(%edi)
- loop 15b
-16: movl %edi,%esp # final %edi is top of merged stack
- jmp 11b
-
-critical_fixup_table:
- .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
- .byte 0x00,0x00 # jne 14f
- .byte 0x00 # pop %ebx
- .byte 0x04 # pop %ecx
- .byte 0x08 # pop %edx
- .byte 0x0c # pop %esi
- .byte 0x10 # pop %edi
- .byte 0x14 # pop %ebp
- .byte 0x18 # pop %eax
- .byte 0x1c # pop %ds
- .byte 0x20 # pop %es
- .byte 0x24,0x24,0x24 # add $4,%esp
- .byte 0x28 # iret
- .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
- .byte 0x00,0x00 # jmp 11b
-
-# Hypervisor uses this for application faults while it executes.
-ENTRY(failsafe_callback)
- pop %ds
- pop %es
- pop %fs
- pop %gs
- iret
-
-ENTRY(coprocessor_error)
- pushl $0
- pushl $do_coprocessor_error
- jmp do_exception
-
-ENTRY(simd_coprocessor_error)
- pushl $0
- pushl $do_simd_coprocessor_error
- jmp do_exception
-
-ENTRY(device_not_available)
- iret
-
-ENTRY(debug)
- pushl $0
- pushl $do_debug
- jmp do_exception
-
-ENTRY(int3)
- pushl $0
- pushl $do_int3
- jmp do_exception
-
-ENTRY(overflow)
- pushl $0
- pushl $do_overflow
- jmp do_exception
-
-ENTRY(bounds)
- pushl $0
- pushl $do_bounds
- jmp do_exception
-
-ENTRY(invalid_op)
- pushl $0
- pushl $do_invalid_op
- jmp do_exception
-
-
-ENTRY(coprocessor_segment_overrun)
- pushl $0
- pushl $do_coprocessor_segment_overrun
- jmp do_exception
-
-
-ENTRY(invalid_TSS)
- pushl $do_invalid_TSS
- jmp do_exception
-
-
-ENTRY(segment_not_present)
- pushl $do_segment_not_present
- jmp do_exception
-
-
-ENTRY(stack_segment)
- pushl $do_stack_segment
- jmp do_exception
-
-
-ENTRY(general_protection)
- pushl $do_general_protection
- jmp do_exception
-
-
-ENTRY(alignment_check)
- pushl $do_alignment_check
- jmp do_exception
-
-
-ENTRY(page_fault)
- pushl $do_page_fault
- jmp do_exception
-
-ENTRY(machine_check)
- pushl $0
- pushl $do_machine_check
- jmp do_exception
-
-
-ENTRY(spurious_interrupt_bug)
- pushl $0
- pushl $do_spurious_interrupt_bug
- jmp do_exception
-
-
-
-ENTRY(thread_starter)
- popl %eax
- popl %ebx
- pushl %eax
- call *%ebx
- call exit_thread
-
diff -r 02c6bf903a8e -r 58d6c9cb95c6 extras/mini-os/x86_64.S
--- a/extras/mini-os/x86_64.S Wed Jan 17 14:17:31 2007 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,385 +0,0 @@
-#include <os.h>
-#include <xen/features.h>
-
-.section __xen_guest
- .ascii "GUEST_OS=Mini-OS"
- .ascii ",XEN_VER=xen-3.0"
- .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
- .ascii ",ELF_PADDR_OFFSET=0x0"
- .ascii ",HYPERCALL_PAGE=0x2"
- .ascii ",LOADER=generic"
- .byte 0
-.text
-
-#define ENTRY(X) .globl X ; X :
-.globl _start, shared_info, hypercall_page
-
-
-_start:
- cld
- movq stack_start(%rip),%rsp
- movq %rsi,%rdi
- call start_kernel
-
-stack_start:
- .quad stack+8192
-
- /* Unpleasant -- the PTE that maps this page is actually overwritten */
- /* to map the real shared-info page! :-) */
- .org 0x1000
-shared_info:
- .org 0x2000
-
-hypercall_page:
- .org 0x3000
-
-
-/* Offsets into shared_info_t. */
-#define evtchn_upcall_pending /* 0 */
-#define evtchn_upcall_mask 1
-
-NMI_MASK = 0x80000000
-
-#define RDI 112
-#define ORIG_RAX 120 /* + error_code */
-#define EFLAGS 144
-
-#define REST_SKIP 6*8
-.macro SAVE_REST
- subq $REST_SKIP,%rsp
-# CFI_ADJUST_CFA_OFFSET REST_SKIP
- movq %rbx,5*8(%rsp)
-# CFI_REL_OFFSET rbx,5*8
- movq %rbp,4*8(%rsp)
-# CFI_REL_OFFSET rbp,4*8
- movq %r12,3*8(%rsp)
-# CFI_REL_OFFSET r12,3*8
- movq %r13,2*8(%rsp)
-# CFI_REL_OFFSET r13,2*8
- movq %r14,1*8(%rsp)
-# CFI_REL_OFFSET r14,1*8
- movq %r15,(%rsp)
-# CFI_REL_OFFSET r15,0*8
-.endm
-
-
-.macro RESTORE_REST
- movq (%rsp),%r15
-# CFI_RESTORE r15
- movq 1*8(%rsp),%r14
-# CFI_RESTORE r14
- movq 2*8(%rsp),%r13
-# CFI_RESTORE r13
- movq 3*8(%rsp),%r12
-# CFI_RESTORE r12
- movq 4*8(%rsp),%rbp
-# CFI_RESTORE rbp
- movq 5*8(%rsp),%rbx
-# CFI_RESTORE rbx
- addq $REST_SKIP,%rsp
-# CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
-.endm
-
-
-#define ARG_SKIP 9*8
-.macro RESTORE_ARGS
skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
- .if \skipr11
- .else
- movq (%rsp),%r11
-# CFI_RESTORE r11
- .endif
- .if \skipr8910
- .else
- movq 1*8(%rsp),%r10
-# CFI_RESTORE r10
- movq 2*8(%rsp),%r9
-# CFI_RESTORE r9
- movq 3*8(%rsp),%r8
-# CFI_RESTORE r8
- .endif
- .if \skiprax
- .else
- movq 4*8(%rsp),%rax
-# CFI_RESTORE rax
- .endif
- .if \skiprcx
- .else
- movq 5*8(%rsp),%rcx
-# CFI_RESTORE rcx
- .endif
- .if \skiprdx
- .else
- movq 6*8(%rsp),%rdx
-# CFI_RESTORE rdx
- .endif
- movq 7*8(%rsp),%rsi
-# CFI_RESTORE rsi
- movq 8*8(%rsp),%rdi
-# CFI_RESTORE rdi
- .if ARG_SKIP+\addskip > 0
- addq $ARG_SKIP+\addskip,%rsp
-# CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
- .endif
-.endm
-
-
-.macro HYPERVISOR_IRET flag
-# testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */
-# jnz 2f /* there is no userspace? */
- testl $NMI_MASK,2*8(%rsp)
- jnz 2f
-
- testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
- jnz 1f
-
- /* Direct iret to kernel space. Correct CS and SS. */
- orb $3,1*8(%rsp)
- orb $3,4*8(%rsp)
-1: iretq
-
-2: /* Slow iret via hypervisor. */
- andl $~NMI_MASK, 16(%rsp)
- pushq $\flag
- jmp hypercall_page + (__HYPERVISOR_iret * 32)
-.endm
-
-/*
- * Exception entry point. This expects an error code/orig_rax on the stack
- * and the exception handler in %rax.
- */
-ENTRY(error_entry)
-# _frame RDI
- /* rdi slot contains rax, oldrax contains error code */
- cld
- subq $14*8,%rsp
-# CFI_ADJUST_CFA_OFFSET (14*8)
- movq %rsi,13*8(%rsp)
-# CFI_REL_OFFSET rsi,RSI
- movq 14*8(%rsp),%rsi /* load rax from rdi slot */
- movq %rdx,12*8(%rsp)
-# CFI_REL_OFFSET rdx,RDX
- movq %rcx,11*8(%rsp)
-# CFI_REL_OFFSET rcx,RCX
- movq %rsi,10*8(%rsp) /* store rax */
-# CFI_REL_OFFSET rax,RAX
- movq %r8, 9*8(%rsp)
-# CFI_REL_OFFSET r8,R8
- movq %r9, 8*8(%rsp)
-# CFI_REL_OFFSET r9,R9
- movq %r10,7*8(%rsp)
-# CFI_REL_OFFSET r10,R10
- movq %r11,6*8(%rsp)
-# CFI_REL_OFFSET r11,R11
- movq %rbx,5*8(%rsp)
-# CFI_REL_OFFSET rbx,RBX
- movq %rbp,4*8(%rsp)
-# CFI_REL_OFFSET rbp,RBP
- movq %r12,3*8(%rsp)
-# CFI_REL_OFFSET r12,R12
- movq %r13,2*8(%rsp)
-# CFI_REL_OFFSET r13,R13
- movq %r14,1*8(%rsp)
-# CFI_REL_OFFSET r14,R14
- movq %r15,(%rsp)
-# CFI_REL_OFFSET r15,R15
-#if 0
- cmpl $__KERNEL_CS,CS(%rsp)
- je error_kernelspace
-#endif
-error_call_handler:
- movq %rdi, RDI(%rsp)
- movq %rsp,%rdi
- movq ORIG_RAX(%rsp),%rsi # get error code
- movq $-1,ORIG_RAX(%rsp)
- call *%rax
-
-.macro zeroentry sym
-# INTR_FRAME
- movq (%rsp),%rcx
- movq 8(%rsp),%r11
- addq $0x10,%rsp /* skip rcx and r11 */
- pushq $0 /* push error code/oldrax */
-# CFI_ADJUST_CFA_OFFSET 8
- pushq %rax /* push real oldrax to the rdi slot */
-# CFI_ADJUST_CFA_OFFSET 8
- leaq \sym(%rip),%rax
- jmp error_entry
-# CFI_ENDPROC
-.endm
-
-.macro errorentry sym
-# XCPT_FRAME
- movq (%rsp),%rcx
- movq 8(%rsp),%r11
- addq $0x10,%rsp /* rsp points to the error code */
- pushq %rax
-# CFI_ADJUST_CFA_OFFSET 8
- leaq \sym(%rip),%rax
- jmp error_entry
-# CFI_ENDPROC
-.endm
-
-#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
-#define XEN_PUT_VCPU_INFO(reg)
-#define XEN_PUT_VCPU_INFO_fixup
-#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
-
-#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
- XEN_LOCKED_BLOCK_EVENTS(reg) ; \
- XEN_PUT_VCPU_INFO(reg)
-
-#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg)
; \
- XEN_LOCKED_UNBLOCK_EVENTS(reg)
; \
- XEN_PUT_VCPU_INFO(reg)
-
-
-
-ENTRY(hypervisor_callback)
- zeroentry hypervisor_callback2
-
-ENTRY(hypervisor_callback2)
- movq %rdi, %rsp
-11: movq %gs:8,%rax
- incl %gs:0
- cmovzq %rax,%rsp
- pushq %rdi
- call do_hypervisor_callback
- popq %rsp
- decl %gs:0
- jmp error_exit
-
-# ALIGN
-restore_all_enable_events:
- XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
-
-scrit: /**** START OF CRITICAL REGION ****/
- XEN_TEST_PENDING(%rsi)
- jnz 14f # process more events if necessary...
- XEN_PUT_VCPU_INFO(%rsi)
- RESTORE_ARGS 0,8,0
- HYPERVISOR_IRET 0
-
-14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
- XEN_PUT_VCPU_INFO(%rsi)
- SAVE_REST
- movq %rsp,%rdi # set the argument again
- jmp 11b
-ecrit: /**** END OF CRITICAL REGION ****/
-
-
-retint_kernel:
-retint_restore_args:
- movl EFLAGS-REST_SKIP(%rsp), %eax
- shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
- XEN_GET_VCPU_INFO(%rsi)
- andb evtchn_upcall_mask(%rsi),%al
- andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
- jnz restore_all_enable_events # != 0 => enable event delivery
- XEN_PUT_VCPU_INFO(%rsi)
-
- RESTORE_ARGS 0,8,0
- HYPERVISOR_IRET 0
-
-
-error_exit:
- RESTORE_REST
-/* cli */
- XEN_BLOCK_EVENTS(%rsi)
- jmp retint_kernel
-
-
-
-ENTRY(failsafe_callback)
- popq %rcx
- popq %r11
- iretq
-
-
-ENTRY(coprocessor_error)
- zeroentry do_coprocessor_error
-
-
-ENTRY(simd_coprocessor_error)
- zeroentry do_simd_coprocessor_error
-
-
-ENTRY(device_not_available)
- zeroentry do_device_not_available
-
-
-ENTRY(debug)
-# INTR_FRAME
-# CFI_ADJUST_CFA_OFFSET 8 */
- zeroentry do_debug
-# CFI_ENDPROC
-
-
-ENTRY(int3)
-# INTR_FRAME
-# CFI_ADJUST_CFA_OFFSET 8 */
- zeroentry do_int3
-# CFI_ENDPROC
-
-ENTRY(overflow)
- zeroentry do_overflow
-
-
-ENTRY(bounds)
- zeroentry do_bounds
-
-
-ENTRY(invalid_op)
- zeroentry do_invalid_op
-
-
-ENTRY(coprocessor_segment_overrun)
- zeroentry do_coprocessor_segment_overrun
-
-
-ENTRY(invalid_TSS)
- errorentry do_invalid_TSS
-
-
-ENTRY(segment_not_present)
- errorentry do_segment_not_present
-
-
-/* runs on exception stack */
-ENTRY(stack_segment)
-# XCPT_FRAME
- errorentry do_stack_segment
-# CFI_ENDPROC
-
-
-ENTRY(general_protection)
- errorentry do_general_protection
-
-
-ENTRY(alignment_check)
- errorentry do_alignment_check
-
-
-ENTRY(divide_error)
- zeroentry do_divide_error
-
-
-ENTRY(spurious_interrupt_bug)
- zeroentry do_spurious_interrupt_bug
-
-
-ENTRY(page_fault)
- errorentry do_page_fault
-
-
-
-
-
-ENTRY(thread_starter)
- popq %rdi
- popq %rbx
- call *%rbx
- call exit_thread
-
-
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|