# HG changeset patch
# User awilliam@xxxxxxxxxxxx
# Date 1169094682 25200
# Node ID 26c75e0e48edf8382e613dcdf6400dbceb8e4db8
# Parent 7e9077dd4010a64d2161d10b688412c998ec7eff
# Parent dd0989523d1700825a9feea3895811cec3c41bfa
merge with xen-unstable.hg
---
extras/mini-os/minios-x86_32.lds | 45 --
extras/mini-os/minios-x86_64.lds | 54 --
extras/mini-os/x86_32.S | 287 -------------
extras/mini-os/x86_64.S | 385 ------------------
Makefile | 9
docs/man/xm.pod.1 | 3
docs/man/xmdomain.cfg.pod.5 | 49 ++
extras/mini-os/Makefile | 168 +++----
extras/mini-os/arch/x86/Makefile | 29 +
extras/mini-os/arch/x86/arch.mk | 28 +
extras/mini-os/arch/x86/minios-x86_32.lds | 45 ++
extras/mini-os/arch/x86/minios-x86_64.lds | 54 ++
extras/mini-os/arch/x86/x86_32.S | 287 +++++++++++++
extras/mini-os/arch/x86/x86_64.S | 385 ++++++++++++++++++
extras/mini-os/minios.mk | 62 ++
linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c | 2
linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c | 16
tools/libfsimage/common/Makefile | 2
tools/libfsimage/common/fsimage.c | 2
tools/libfsimage/common/fsimage_grub.c | 2
tools/libfsimage/common/fsimage_plugin.c | 8
tools/libfsimage/common/fsimage_plugin.h | 5
tools/libfsimage/common/mapfile-GNU | 3
tools/libfsimage/common/mapfile-SunOS | 3
tools/libfsimage/ext2fs-lib/ext2fs-lib.c | 2
tools/libxc/xc_linux_build.c | 2
tools/libxc/xc_linux_restore.c | 207 ++++++---
tools/libxc/xc_linux_save.c | 7
tools/pygrub/src/pygrub | 3
tools/python/xen/xend/XendCheckpoint.py | 8
tools/python/xen/xend/XendConfig.py | 1
tools/python/xen/xend/XendNode.py | 10
tools/tests/test_x86_emulator.c | 3
xen/arch/x86/hvm/i8254.c | 14
xen/arch/x86/hvm/svm/svm.c | 4
xen/arch/x86/hvm/vmx/vmx.c | 7
xen/arch/x86/microcode.c | 9
xen/arch/x86/mm.c | 16
xen/arch/x86/mm/shadow/common.c | 22 -
xen/arch/x86/mm/shadow/multi.c | 4
xen/arch/x86/mm/shadow/private.h | 5
xen/arch/x86/x86_emulate.c | 78 ++-
xen/include/asm-powerpc/byteorder.h | 80 +++
xen/include/asm-x86/byteorder.h | 36 +
xen/include/asm-x86/x86_emulate.h | 7
xen/include/xen/byteorder/big_endian.h | 106 ++++
xen/include/xen/byteorder/generic.h | 68 +++
xen/include/xen/byteorder/little_endian.h | 106 ++++
xen/include/xen/byteorder/swab.h | 185 ++++++++
xen/include/xen/config.h | 2
xen/include/xen/types.h | 7
51 files changed, 1883 insertions(+), 1049 deletions(-)
diff -r 7e9077dd4010 -r 26c75e0e48ed Makefile
--- a/Makefile Wed Jan 17 19:55:48 2007 -0700
+++ b/Makefile Wed Jan 17 21:31:22 2007 -0700
@@ -2,18 +2,15 @@
# Grand Unified Makefile for Xen.
#
-# Export target architecture overrides to Xen and Linux sub-trees.
-ifneq ($(XEN_TARGET_ARCH),)
-SUBARCH := $(subst x86_32,i386,$(XEN_TARGET_ARCH))
-export XEN_TARGET_ARCH SUBARCH XEN_SYSTYPE
-endif
-
# Default target must appear before any include lines
.PHONY: all
all: dist
export XEN_ROOT=$(CURDIR)
include Config.mk
+
+SUBARCH := $(subst x86_32,i386,$(XEN_TARGET_ARCH))
+export XEN_TARGET_ARCH SUBARCH XEN_SYSTYPE
include buildconfigs/Rules.mk
ifeq ($(XEN_TARGET_X86_PAE),y)
diff -r 7e9077dd4010 -r 26c75e0e48ed docs/man/xm.pod.1
--- a/docs/man/xm.pod.1 Wed Jan 17 19:55:48 2007 -0700
+++ b/docs/man/xm.pod.1 Wed Jan 17 21:31:22 2007 -0700
@@ -451,6 +451,7 @@ make the man page more readable):
xen_minor : 0
xen_extra : -devel
xen_caps : xen-3.0-x86_32
+ xen_scheduler : credit
xen_pagesize : 4096
platform_params : virt_start=0xfc000000
xen_changeset : Mon Nov 14 18:13:38 2005 +0100
@@ -460,7 +461,7 @@ make the man page more readable):
cc_compile_by : sdague
cc_compile_domain : (none)
cc_compile_date : Mon Nov 14 14:16:48 EST 2005
- xend_config_format : 2
+ xend_config_format : 3
B<FIELDS>
diff -r 7e9077dd4010 -r 26c75e0e48ed docs/man/xmdomain.cfg.pod.5
--- a/docs/man/xmdomain.cfg.pod.5 Wed Jan 17 19:55:48 2007 -0700
+++ b/docs/man/xmdomain.cfg.pod.5 Wed Jan 17 21:31:22 2007 -0700
@@ -135,6 +135,55 @@ one will be randomly chosen by xen with
=back
+=item B<vfb>
+
+A virtual frame buffer stanza in the form:
+
+ vfb = [ "stanza" ]
+
+The stanza specifies a set of I<name = value> options separated by
+commas, in the form: "name1=value1, name2=value2, ..."
+
+B<OPTIONS>
+
+=over 4
+
+=item I<type>
+
+There are currently two valid options: I<vnc> starts a VNC server that
+lets you connect an external VNC viewer, and I<sdl> starts an internal
+viewer.
+
+=item I<vncdisplay>
+
+The VNC display number to use, defaults to the domain ID. The
+VNC server listens on port 5900 + display number.
+
+=item I<vnclisten>
+
+The listening address for the VNC server, default 127.0.0.1.
+
+=item I<vncunused>
+
+If non-zero, the VNC server listens on the first unused port above
+5900.
+
+=item I<vncpasswd>
+
+Overrides the XenD configured default password.
+
+=item I<display>
+
+Display to use for the internal viewer, defaults to environment
+variable I<DISPLAY>.
+
+=item I<xauthority>
+
+Authority file to use for the internal viewer, defaults to environment
+variable I<XAUTHORITY>.
+
+=back
+
=back
=head1 ADDITIONAL OPTIONS
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/Makefile
--- a/extras/mini-os/Makefile Wed Jan 17 19:55:48 2007 -0700
+++ b/extras/mini-os/Makefile Wed Jan 17 21:31:22 2007 -0700
@@ -1,112 +1,88 @@ debug ?= y
-debug ?= y
+# Common Makefile for mini-os.
+#
+# Every architecture directory below mini-os/arch has to have a
+# Makefile and a arch.mk.
+#
+
pae ?= n
XEN_ROOT = ../..
include $(XEN_ROOT)/Config.mk
+XEN_INTERFACE_VERSION := 0x00030204
+export XEN_INTERFACE_VERSION
+
# Set TARGET_ARCH
-override TARGET_ARCH := $(XEN_TARGET_ARCH)
+override TARGET_ARCH := $(XEN_TARGET_ARCH)
-XEN_INTERFACE_VERSION := 0x00030204
+# Set mini-os root path, used in mini-os.mk.
+MINI-OS_ROOT=$(PWD)
+export MINI-OS_ROOT
-# NB. '-Wcast-qual' is nasty, so I omitted it.
-CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
-CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
-CFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION)
+# Try to find out the architecture family TARGET_ARCH_FAM.
+# First check whether x86_... is contained (for x86_32, x86_32y, x86_64).
+# If not x86 then use $(TARGET_ARCH) -> for ia64, ...
+ifeq ($(findstring x86_,$(TARGET_ARCH)),x86_)
+TARGET_ARCH_FAM = x86
+else
+TARGET_ARCH_FAM = $(TARGET_ARCH)
+endif
-ASFLAGS = -D__ASSEMBLY__
+# The architecture family directory below mini-os.
+TARGET_ARCH_DIR := arch/$(TARGET_ARCH_FAM)
-LDLIBS = -L. -lminios
-LDFLAGS_FINAL := -N -T minios-$(TARGET_ARCH).lds
-LDFLAGS :=
+# Export these variables for possible use in architecture dependent makefiles.
+export TARGET_ARCH
+export TARGET_ARCH_DIR
+export TARGET_ARCH_FAM
+
+# This is used for architecture specific links.
+# This can be overwritten from arch specific rules.
+ARCH_LINKS =
+
+# For possible special header directories.
+# This can be overwritten from arch specific rules.
+EXTRA_INC =
+
+# Special build dependencies.
+# Build all after touching this/these file(s) (see minios.mk)
+SPEC_DEPENDS = minios.mk
+
+# Include the architecture family's special makerules.
+# This must be before include minios.mk!
+include $(TARGET_ARCH_DIR)/arch.mk
+
+# Include common mini-os makerules.
+include minios.mk
+
+# Define some default flags for linking.
+LDLIBS :=
+LDFLAGS :=
+LDARCHLIB := -L$(TARGET_ARCH_DIR) -l$(ARCH_LIB_NAME)
+LDFLAGS_FINAL := -N -T $(TARGET_ARCH_DIR)/minios-$(TARGET_ARCH).lds
# Prefix for global API names. All other symbols are localised before
# linking with EXTRA_OBJS.
GLOBAL_PREFIX := xenos_
EXTRA_OBJS =
-# For possible special source directories.
-EXTRA_SRC =
-# For possible special header directories.
-EXTRA_INC =
-
-# Standard name for architecture specific subdirectories.
-TARGET_ARCH_DIR = $(TARGET_ARCH)
-# This is used for architecture specific links.
-ARCH_LINKS =
-
-ifeq ($(TARGET_ARCH),x86_32)
-CFLAGS += -m32 -march=i686
-LDFLAGS += -m elf_i386
-TARGET_ARCH_DIR = x86
-EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
-EXTRA_SRC += arch/$(EXTRA_INC)
-endif
-
-ifeq ($(TARGET_ARCH)$(pae),x86_32y)
-CFLAGS += -DCONFIG_X86_PAE=1
-ASFLAGS += -DCONFIG_X86_PAE=1
-TARGET_ARCH_DIR = x86
-EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
-EXTRA_SRC += arch/$(EXTRA_INC)
-endif
-
-ifeq ($(TARGET_ARCH),x86_64)
-CFLAGS += -m64 -mno-red-zone -fpic -fno-reorder-blocks
-CFLAGS += -fno-asynchronous-unwind-tables
-LDFLAGS += -m elf_x86_64
-TARGET_ARCH_DIR = x86
-EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
-EXTRA_SRC += arch/$(EXTRA_INC)
-endif
-
-ifeq ($(TARGET_ARCH),ia64)
-CFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -mconstant-gp
-ASFLAGS += -x assembler-with-cpp -Wall
-ASFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -fomit-frame-pointer
-ASFLAGS += -fno-builtin -fno-common -fno-strict-aliasing -mconstant-gp
-ARCH_LINKS = IA64_LINKS # Special link on ia64 needed
-define arch_links
-[ -e include/ia64/asm-xsi-offsets.h ] || ln -sf
../../../../xen/include/asm-ia64/asm-xsi-offsets.h
include/ia64/asm-xsi-offsets.h
-endef
-endif
-
-ifeq ($(debug),y)
-CFLAGS += -g
-else
-CFLAGS += -O3
-endif
-
-# Add the special header directories to the include paths.
-extra_incl := $(foreach dir,$(EXTRA_INC),-Iinclude/$(dir))
-override CPPFLAGS := -Iinclude $(CPPFLAGS) -Iinclude/$(TARGET_ARCH_DIR)
$(extra_incl)
-
TARGET := mini-os
-HEAD := $(TARGET_ARCH).o
+# Subdirectories common to mini-os
+SUBDIRS := lib xenbus console
+
+# The common mini-os objects to build.
OBJS := $(patsubst %.c,%.o,$(wildcard *.c))
OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c))
OBJS += $(patsubst %.c,%.o,$(wildcard xenbus/*.c))
OBJS += $(patsubst %.c,%.o,$(wildcard console/*.c))
-OBJS += $(patsubst %.S,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.S))
-OBJS += $(patsubst %.c,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.c))
-# For special wanted source directories.
-extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.c,%.o,$(wildcard
$(dir)/*.c)))
-OBJS += $(extra_objs)
-extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.S,%.o,$(wildcard
$(dir)/*.S)))
-OBJS += $(extra_objs)
-HDRS := $(wildcard include/*.h)
-HDRS += $(wildcard include/xen/*.h)
-HDRS += $(wildcard include/$(TARGET_ARCH_DIR)/*.h)
-# For special wanted header directories.
-extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h))
-HDRS += $(extra_heads)
.PHONY: default
default: $(TARGET)
-# Create special architecture specific links.
+# Create special architecture specific links. The function arch_links
+# has to be defined in arch.mk (see include above).
ifneq ($(ARCH_LINKS),)
$(ARCH_LINKS):
$(arch_links)
@@ -116,24 +92,29 @@ links: $(ARCH_LINKS)
links: $(ARCH_LINKS)
[ -e include/xen ] || ln -sf ../../../xen/include/public include/xen
-$(TARGET): links $(OBJS) $(HEAD)
- $(LD) -r $(LDFLAGS) $(HEAD) $(OBJS) -o $@.o
+.PHONY: arch_lib
+arch_lib:
+ $(MAKE) --directory=$(TARGET_ARCH_DIR) || exit 1;
+
+$(TARGET): links $(OBJS) arch_lib
+ $(LD) -r $(LDFLAGS) $(HEAD_OBJ) $(OBJS) $(LDARCHLIB) -o $@.o
$(OBJCOPY) -w -G $(GLOBAL_PREFIX)* -G _start $@.o $@.o
$(LD) $(LDFLAGS) $(LDFLAGS_FINAL) $@.o $(EXTRA_OBJS) -o $@
gzip -f -9 -c $@ >$@.gz
-.PHONY: clean
-clean:
- find . -type f -name '*.o' | xargs rm -f
- rm -f *.o *~ core $(TARGET) $(TARGET).gz
+.PHONY: clean arch_clean
+
+arch_clean:
+ $(MAKE) --directory=$(TARGET_ARCH_DIR) clean || exit 1;
+
+clean: arch_clean
+ for dir in $(SUBDIRS); do \
+ rm -f $$dir/*.o; \
+ done
+ rm -f *.o *~ core $(TARGET).elf $(TARGET).raw $(TARGET) $(TARGET).gz
find . -type l | xargs rm -f
rm -f tags TAGS
-%.o: %.c $(HDRS) Makefile
- $(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
-
-%.o: %.S $(HDRS) Makefile
- $(CC) $(ASFLAGS) $(CPPFLAGS) -c $< -o $@
define all_sources
( find . -follow -name SCCS -prune -o -name '*.[chS]' -print )
@@ -147,3 +128,4 @@ cscope:
.PHONY: tags
tags:
$(all_sources) | xargs ctags
+
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/arch/x86/Makefile
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/Makefile Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,29 @@
+#
+# x86 architecture specific makefiles.
+# It's is used for x86_32, x86_32y and x86_64
+#
+
+# Rebuild all after touching this/these extra file(s) (see mini-os.mk)
+SPEC_DEP = arch.mk
+
+# include arch.mk has to be before mini-os.mk!
+include arch.mk
+include ../../minios.mk
+
+# Sources here are all *.c *.S without $(TARGET_ARCH).S
+# This is handled in $(HEAD_ARCH_OBJ)
+ARCH_SRCS := $(wildcard *.c)
+
+# The objects built from the sources.
+ARCH_OBJS := $(patsubst %.c,%.o,$(ARCH_SRCS))
+
+all: $(ARCH_LIB)
+
+# $(HEAD_ARCH_OBJ) is only build here, needed on linking
+# in ../../Makefile.
+$(ARCH_LIB): $(ARCH_OBJS) $(HEAD_ARCH_OBJ)
+ $(AR) rv $(ARCH_LIB) $(ARCH_OBJS)
+
+clean:
+ rm -f $(ARCH_LIB) $(ARCH_OBJS)
+
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/arch/x86/arch.mk
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/arch.mk Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,28 @@
+#
+# Architecture special makerules for x86 family
+# (including x86_32, x86_32y and x86_64).
+#
+
+ifeq ($(TARGET_ARCH),x86_32)
+ARCH_CFLAGS := -m32 -march=i686
+ARCH_LDFLAGS := -m elf_i386
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+ifeq ($(TARGET_ARCH)$(pae),x86_32y)
+ARCH_CFLAGS := -DCONFIG_X86_PAE=1
+ARCH_ASFLAGS := -DCONFIG_X86_PAE=1
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+ifeq ($(TARGET_ARCH),x86_64)
+ARCH_CFLAGS := -m64 -mno-red-zone -fpic -fno-reorder-blocks
+ARCH_CFLAGS := -fno-asynchronous-unwind-tables
+ARCH_LDFLAGS := -m elf_x86_64
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/arch/x86/minios-x86_32.lds
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/minios-x86_32.lds Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,45 @@
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x0;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.gnu.warning)
+ } = 0x9090
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) }
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/arch/x86/minios-x86_64.lds
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/minios-x86_64.lds Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,54 @@
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x0;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.gnu.warning)
+ } = 0x9090
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) }
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/arch/x86/x86_32.S
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/x86_32.S Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,287 @@
+#include <os.h>
+#include <xen/arch-x86_32.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=Mini-OS"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
+ .ascii ",ELF_PADDR_OFFSET=0x0"
+ .ascii ",HYPERCALL_PAGE=0x2"
+#ifdef CONFIG_X86_PAE
+ .ascii ",PAE=yes"
+#else
+ .ascii ",PAE=no"
+#endif
+ .ascii ",LOADER=generic"
+ .byte 0
+.text
+
+.globl _start, shared_info, hypercall_page
+
+_start:
+ cld
+ lss stack_start,%esp
+ push %esi
+ call start_kernel
+
+stack_start:
+ .long stack+8192, __KERNEL_SS
+
+ /* Unpleasant -- the PTE that maps this page is actually overwritten */
+ /* to map the real shared-info page! :-) */
+ .org 0x1000
+shared_info:
+ .org 0x2000
+
+hypercall_page:
+ .org 0x3000
+
+ES = 0x20
+ORIG_EAX = 0x24
+EIP = 0x28
+CS = 0x2C
+
+#define ENTRY(X) .globl X ; X :
+
+#define SAVE_ALL \
+ cld; \
+ pushl %es; \
+ pushl %ds; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ movl $(__KERNEL_DS),%edx; \
+ movl %edx,%ds; \
+ movl %edx,%es;
+
+#define RESTORE_ALL \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax; \
+ popl %ds; \
+ popl %es; \
+ addl $4,%esp; \
+ iret; \
+
+ENTRY(divide_error)
+ pushl $0 # no error code
+ pushl $do_divide_error
+do_exception:
+ pushl %ds
+ pushl %eax
+ xorl %eax, %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax # eax = -1
+ pushl %ecx
+ pushl %ebx
+ cld
+ movl %es, %ecx
+ movl ES(%esp), %edi # get the function address
+ movl ORIG_EAX(%esp), %edx # get the error code
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ movl %esp,%eax # pt_regs pointer
+ pushl %edx
+ pushl %eax
+ call *%edi
+ jmp ret_from_exception
+
+ret_from_exception:
+ movb CS(%esp),%cl
+ test $2,%cl # slow return to ring 2 or 3
+ jne safesti
+ RESTORE_ALL
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until weve done all processing. HOWEVER, we must enable events before
+# popping the stack frame (cant be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so wed
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+ pushl %eax
+ SAVE_ALL
+ movl EIP(%esp),%eax
+ cmpl $scrit,%eax
+ jb 11f
+ cmpl $ecrit,%eax
+ jb critical_region_fixup
+11: push %esp
+ call do_hypervisor_callback
+ add $4,%esp
+ movl HYPERVISOR_shared_info,%esi
+ xorl %eax,%eax
+ movb CS(%esp),%cl
+ test $2,%cl # slow return to ring 2 or 3
+ jne safesti
+safesti:movb $0,1(%esi) # reenable event callbacks
+scrit: /**** START OF CRITICAL REGION ****/
+ testb $0xFF,(%esi)
+ jnz 14f # process more events if necessary...
+ RESTORE_ALL
+14: movb $1,1(%esi)
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame.
+critical_region_fixup:
+ addl $critical_fixup_table-scrit,%eax
+ movzbl (%eax),%eax # %eax contains num bytes popped
+ mov %esp,%esi
+ add %eax,%esi # %esi points at end of src region
+ mov %esp,%edi
+ add $0x34,%edi # %edi points at end of dst region
+ mov %eax,%ecx
+ shr $2,%ecx # convert words to bytes
+ je 16f # skip loop if nothing to copy
+15: subl $4,%esi # pre-decrementing copy loop
+ subl $4,%edi
+ movl (%esi),%eax
+ movl %eax,(%edi)
+ loop 15b
+16: movl %edi,%esp # final %edi is top of merged stack
+ jmp 11b
+
+critical_fixup_table:
+ .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
+ .byte 0x00,0x00 # jne 14f
+ .byte 0x00 # pop %ebx
+ .byte 0x04 # pop %ecx
+ .byte 0x08 # pop %edx
+ .byte 0x0c # pop %esi
+ .byte 0x10 # pop %edi
+ .byte 0x14 # pop %ebp
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+ .byte 0x24,0x24,0x24 # add $4,%esp
+ .byte 0x28 # iret
+ .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
+ .byte 0x00,0x00 # jmp 11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+ pop %ds
+ pop %es
+ pop %fs
+ pop %gs
+ iret
+
+ENTRY(coprocessor_error)
+ pushl $0
+ pushl $do_coprocessor_error
+ jmp do_exception
+
+ENTRY(simd_coprocessor_error)
+ pushl $0
+ pushl $do_simd_coprocessor_error
+ jmp do_exception
+
+ENTRY(device_not_available)
+ iret
+
+ENTRY(debug)
+ pushl $0
+ pushl $do_debug
+ jmp do_exception
+
+ENTRY(int3)
+ pushl $0
+ pushl $do_int3
+ jmp do_exception
+
+ENTRY(overflow)
+ pushl $0
+ pushl $do_overflow
+ jmp do_exception
+
+ENTRY(bounds)
+ pushl $0
+ pushl $do_bounds
+ jmp do_exception
+
+ENTRY(invalid_op)
+ pushl $0
+ pushl $do_invalid_op
+ jmp do_exception
+
+
+ENTRY(coprocessor_segment_overrun)
+ pushl $0
+ pushl $do_coprocessor_segment_overrun
+ jmp do_exception
+
+
+ENTRY(invalid_TSS)
+ pushl $do_invalid_TSS
+ jmp do_exception
+
+
+ENTRY(segment_not_present)
+ pushl $do_segment_not_present
+ jmp do_exception
+
+
+ENTRY(stack_segment)
+ pushl $do_stack_segment
+ jmp do_exception
+
+
+ENTRY(general_protection)
+ pushl $do_general_protection
+ jmp do_exception
+
+
+ENTRY(alignment_check)
+ pushl $do_alignment_check
+ jmp do_exception
+
+
+ENTRY(page_fault)
+ pushl $do_page_fault
+ jmp do_exception
+
+ENTRY(machine_check)
+ pushl $0
+ pushl $do_machine_check
+ jmp do_exception
+
+
+ENTRY(spurious_interrupt_bug)
+ pushl $0
+ pushl $do_spurious_interrupt_bug
+ jmp do_exception
+
+
+
+ENTRY(thread_starter)
+ popl %eax
+ popl %ebx
+ pushl %eax
+ call *%ebx
+ call exit_thread
+
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/arch/x86/x86_64.S
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/arch/x86/x86_64.S Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,385 @@
+#include <os.h>
+#include <xen/features.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=Mini-OS"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
+ .ascii ",ELF_PADDR_OFFSET=0x0"
+ .ascii ",HYPERCALL_PAGE=0x2"
+ .ascii ",LOADER=generic"
+ .byte 0
+.text
+
+#define ENTRY(X) .globl X ; X :
+.globl _start, shared_info, hypercall_page
+
+
+_start:
+ cld
+ movq stack_start(%rip),%rsp
+ movq %rsi,%rdi
+ call start_kernel
+
+stack_start:
+ .quad stack+8192
+
+ /* Unpleasant -- the PTE that maps this page is actually overwritten */
+ /* to map the real shared-info page! :-) */
+ .org 0x1000
+shared_info:
+ .org 0x2000
+
+hypercall_page:
+ .org 0x3000
+
+
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending /* 0 */
+#define evtchn_upcall_mask 1
+
+NMI_MASK = 0x80000000
+
+#define RDI 112
+#define ORIG_RAX 120 /* + error_code */
+#define EFLAGS 144
+
+#define REST_SKIP 6*8
+.macro SAVE_REST
+ subq $REST_SKIP,%rsp
+# CFI_ADJUST_CFA_OFFSET REST_SKIP
+ movq %rbx,5*8(%rsp)
+# CFI_REL_OFFSET rbx,5*8
+ movq %rbp,4*8(%rsp)
+# CFI_REL_OFFSET rbp,4*8
+ movq %r12,3*8(%rsp)
+# CFI_REL_OFFSET r12,3*8
+ movq %r13,2*8(%rsp)
+# CFI_REL_OFFSET r13,2*8
+ movq %r14,1*8(%rsp)
+# CFI_REL_OFFSET r14,1*8
+ movq %r15,(%rsp)
+# CFI_REL_OFFSET r15,0*8
+.endm
+
+
+.macro RESTORE_REST
+ movq (%rsp),%r15
+# CFI_RESTORE r15
+ movq 1*8(%rsp),%r14
+# CFI_RESTORE r14
+ movq 2*8(%rsp),%r13
+# CFI_RESTORE r13
+ movq 3*8(%rsp),%r12
+# CFI_RESTORE r12
+ movq 4*8(%rsp),%rbp
+# CFI_RESTORE rbp
+ movq 5*8(%rsp),%rbx
+# CFI_RESTORE rbx
+ addq $REST_SKIP,%rsp
+# CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
+.endm
+
+
+#define ARG_SKIP 9*8
+.macro RESTORE_ARGS
skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
+ .if \skipr11
+ .else
+ movq (%rsp),%r11
+# CFI_RESTORE r11
+ .endif
+ .if \skipr8910
+ .else
+ movq 1*8(%rsp),%r10
+# CFI_RESTORE r10
+ movq 2*8(%rsp),%r9
+# CFI_RESTORE r9
+ movq 3*8(%rsp),%r8
+# CFI_RESTORE r8
+ .endif
+ .if \skiprax
+ .else
+ movq 4*8(%rsp),%rax
+# CFI_RESTORE rax
+ .endif
+ .if \skiprcx
+ .else
+ movq 5*8(%rsp),%rcx
+# CFI_RESTORE rcx
+ .endif
+ .if \skiprdx
+ .else
+ movq 6*8(%rsp),%rdx
+# CFI_RESTORE rdx
+ .endif
+ movq 7*8(%rsp),%rsi
+# CFI_RESTORE rsi
+ movq 8*8(%rsp),%rdi
+# CFI_RESTORE rdi
+ .if ARG_SKIP+\addskip > 0
+ addq $ARG_SKIP+\addskip,%rsp
+# CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
+ .endif
+.endm
+
+
+.macro HYPERVISOR_IRET flag
+# testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */
+# jnz 2f /* there is no userspace? */
+ testl $NMI_MASK,2*8(%rsp)
+ jnz 2f
+
+ testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
+ jnz 1f
+
+ /* Direct iret to kernel space. Correct CS and SS. */
+ orb $3,1*8(%rsp)
+ orb $3,4*8(%rsp)
+1: iretq
+
+2: /* Slow iret via hypervisor. */
+ andl $~NMI_MASK, 16(%rsp)
+ pushq $\flag
+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
+.endm
+
+/*
+ * Exception entry point. This expects an error code/orig_rax on the stack
+ * and the exception handler in %rax.
+ */
+ENTRY(error_entry)
+# _frame RDI
+ /* rdi slot contains rax, oldrax contains error code */
+ cld
+ subq $14*8,%rsp
+# CFI_ADJUST_CFA_OFFSET (14*8)
+ movq %rsi,13*8(%rsp)
+# CFI_REL_OFFSET rsi,RSI
+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
+ movq %rdx,12*8(%rsp)
+# CFI_REL_OFFSET rdx,RDX
+ movq %rcx,11*8(%rsp)
+# CFI_REL_OFFSET rcx,RCX
+ movq %rsi,10*8(%rsp) /* store rax */
+# CFI_REL_OFFSET rax,RAX
+ movq %r8, 9*8(%rsp)
+# CFI_REL_OFFSET r8,R8
+ movq %r9, 8*8(%rsp)
+# CFI_REL_OFFSET r9,R9
+ movq %r10,7*8(%rsp)
+# CFI_REL_OFFSET r10,R10
+ movq %r11,6*8(%rsp)
+# CFI_REL_OFFSET r11,R11
+ movq %rbx,5*8(%rsp)
+# CFI_REL_OFFSET rbx,RBX
+ movq %rbp,4*8(%rsp)
+# CFI_REL_OFFSET rbp,RBP
+ movq %r12,3*8(%rsp)
+# CFI_REL_OFFSET r12,R12
+ movq %r13,2*8(%rsp)
+# CFI_REL_OFFSET r13,R13
+ movq %r14,1*8(%rsp)
+# CFI_REL_OFFSET r14,R14
+ movq %r15,(%rsp)
+# CFI_REL_OFFSET r15,R15
+#if 0
+ cmpl $__KERNEL_CS,CS(%rsp)
+ je error_kernelspace
+#endif
+error_call_handler:
+ movq %rdi, RDI(%rsp)
+ movq %rsp,%rdi
+ movq ORIG_RAX(%rsp),%rsi # get error code
+ movq $-1,ORIG_RAX(%rsp)
+ call *%rax
+
+.macro zeroentry sym
+# INTR_FRAME
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ pushq $0 /* push error code/oldrax */
+# CFI_ADJUST_CFA_OFFSET 8
+ pushq %rax /* push real oldrax to the rdi slot */
+# CFI_ADJUST_CFA_OFFSET 8
+ leaq \sym(%rip),%rax
+ jmp error_entry
+# CFI_ENDPROC
+.endm
+
+.macro errorentry sym
+# XCPT_FRAME
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* rsp points to the error code */
+ pushq %rax
+# CFI_ADJUST_CFA_OFFSET 8
+ leaq \sym(%rip),%rax
+ jmp error_entry
+# CFI_ENDPROC
+.endm
+
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+
+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg)
; \
+ XEN_LOCKED_UNBLOCK_EVENTS(reg)
; \
+ XEN_PUT_VCPU_INFO(reg)
+
+
+
+ENTRY(hypervisor_callback)
+ zeroentry hypervisor_callback2
+
+ENTRY(hypervisor_callback2)
+ movq %rdi, %rsp
+11: movq %gs:8,%rax
+ incl %gs:0
+ cmovzq %rax,%rsp
+ pushq %rdi
+ call do_hypervisor_callback
+ popq %rsp
+ decl %gs:0
+ jmp error_exit
+
+# ALIGN
+restore_all_enable_events:
+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
+
+scrit: /**** START OF CRITICAL REGION ****/
+ XEN_TEST_PENDING(%rsi)
+ jnz 14f # process more events if necessary...
+ XEN_PUT_VCPU_INFO(%rsi)
+ RESTORE_ARGS 0,8,0
+ HYPERVISOR_IRET 0
+
+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
+ XEN_PUT_VCPU_INFO(%rsi)
+ SAVE_REST
+ movq %rsp,%rdi # set the argument again
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+
+
+retint_kernel:
+retint_restore_args:
+ movl EFLAGS-REST_SKIP(%rsp), %eax
+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
+ XEN_GET_VCPU_INFO(%rsi)
+ andb evtchn_upcall_mask(%rsi),%al
+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
+ jnz restore_all_enable_events # != 0 => enable event delivery
+ XEN_PUT_VCPU_INFO(%rsi)
+
+ RESTORE_ARGS 0,8,0
+ HYPERVISOR_IRET 0
+
+
+error_exit:
+ RESTORE_REST
+/* cli */
+ XEN_BLOCK_EVENTS(%rsi)
+ jmp retint_kernel
+
+
+
+ENTRY(failsafe_callback)
+ popq %rcx
+ popq %r11
+ iretq
+
+
+ENTRY(coprocessor_error)
+ zeroentry do_coprocessor_error
+
+
+ENTRY(simd_coprocessor_error)
+ zeroentry do_simd_coprocessor_error
+
+
+ENTRY(device_not_available)
+ zeroentry do_device_not_available
+
+
+ENTRY(debug)
+# INTR_FRAME
+# CFI_ADJUST_CFA_OFFSET 8 */
+ zeroentry do_debug
+# CFI_ENDPROC
+
+
+ENTRY(int3)
+# INTR_FRAME
+# CFI_ADJUST_CFA_OFFSET 8 */
+ zeroentry do_int3
+# CFI_ENDPROC
+
+ENTRY(overflow)
+ zeroentry do_overflow
+
+
+ENTRY(bounds)
+ zeroentry do_bounds
+
+
+ENTRY(invalid_op)
+ zeroentry do_invalid_op
+
+
+ENTRY(coprocessor_segment_overrun)
+ zeroentry do_coprocessor_segment_overrun
+
+
+ENTRY(invalid_TSS)
+ errorentry do_invalid_TSS
+
+
+ENTRY(segment_not_present)
+ errorentry do_segment_not_present
+
+
+/* runs on exception stack */
+ENTRY(stack_segment)
+# XCPT_FRAME
+ errorentry do_stack_segment
+# CFI_ENDPROC
+
+
+ENTRY(general_protection)
+ errorentry do_general_protection
+
+
+ENTRY(alignment_check)
+ errorentry do_alignment_check
+
+
+ENTRY(divide_error)
+ zeroentry do_divide_error
+
+
+ENTRY(spurious_interrupt_bug)
+ zeroentry do_spurious_interrupt_bug
+
+
+ENTRY(page_fault)
+ errorentry do_page_fault
+
+
+
+
+
+ENTRY(thread_starter)
+ popq %rdi
+ popq %rbx
+ call *%rbx
+ call exit_thread
+
+
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/minios-x86_32.lds
--- a/extras/mini-os/minios-x86_32.lds Wed Jan 17 19:55:48 2007 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-SECTIONS
-{
- . = 0x0;
- _text = .; /* Text and read-only data */
- .text : {
- *(.text)
- *(.gnu.warning)
- } = 0x9090
-
- _etext = .; /* End of text section */
-
- .rodata : { *(.rodata) *(.rodata.*) }
-
- .data : { /* Data */
- *(.data)
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss)
- }
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.text.exit)
- *(.data.exit)
- *(.exitcall.exit)
- }
-
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-}
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/minios-x86_64.lds
--- a/extras/mini-os/minios-x86_64.lds Wed Jan 17 19:55:48 2007 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
-OUTPUT_ARCH(i386:x86-64)
-ENTRY(_start)
-SECTIONS
-{
- . = 0x0;
- _text = .; /* Text and read-only data */
- .text : {
- *(.text)
- *(.gnu.warning)
- } = 0x9090
-
- _etext = .; /* End of text section */
-
- .rodata : { *(.rodata) *(.rodata.*) }
-
- .data : { /* Data */
- *(.data)
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- . = ALIGN(8192); /* init_task */
- .data.init_task : { *(.data.init_task) }
-
- . = ALIGN(4096);
- .data.page_aligned : { *(.data.idt) }
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss)
- }
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.text.exit)
- *(.data.exit)
- *(.exitcall.exit)
- }
-
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-}
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/minios.mk
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extras/mini-os/minios.mk Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,62 @@
+#
+# The file contains the common make rules for building mini-os.
+#
+
+debug = y
+
+# Define some default flags.
+# NB. '-Wcast-qual' is nasty, so I omitted it.
+DEF_CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
+DEF_CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
+DEF_CFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION)
+
+DEF_ASFLAGS = -D__ASSEMBLY__
+
+ifeq ($(debug),y)
+DEF_CFLAGS += -g
+else
+DEF_CFLAGS += -O3
+endif
+
+# Build the CFLAGS and ASFLAGS for compiling and assembling.
+# DEF_... flags are the common mini-os flags,
+# ARCH_... flags may be defined in arch/$(TARGET_ARCH_FAM/rules.mk
+CFLAGS := $(DEF_CFLAGS) $(ARCH_CFLAGS)
+ASFLAGS := $(DEF_ASFLAGS) $(ARCH_ASFLAGS)
+
+# The path pointing to the architecture specific header files.
+ARCH_SPEC_INC := $(MINI-OS_ROOT)/include/$(TARGET_ARCH_FAM)
+
+# Find all header files for checking dependencies.
+HDRS := $(wildcard $(MINI-OS_ROOT)/include/*.h)
+HDRS += $(wildcard $(MINI-OS_ROOT)/include/xen/*.h)
+HDRS += $(wildcard $(ARCH_SPEC_INC)/*.h)
+# For special wanted header directories.
+extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h))
+HDRS += $(extra_heads)
+
+# Add the special header directories to the include paths.
+extra_incl := $(foreach dir,$(EXTRA_INC),-I$(MINI-OS_ROOT)/include/$(dir))
+override CPPFLAGS := -I$(MINI-OS_ROOT)/include $(CPPFLAGS) -I$(ARCH_SPEC_INC)
$(extra_incl)
+
+# The name of the architecture specific library.
+# This is on x86_32: libx86_32.a
+# $(ARCH_LIB) has to built in the architecture specific directory.
+ARCH_LIB_NAME = $(TARGET_ARCH)
+ARCH_LIB := lib$(ARCH_LIB_NAME).a
+
+# This object contains the entrypoint for startup from Xen.
+# $(HEAD_ARCH_OBJ) has to be built in the architecture specific directory.
+HEAD_ARCH_OBJ := $(TARGET_ARCH).o
+HEAD_OBJ := $(TARGET_ARCH_DIR)/$(HEAD_ARCH_OBJ)
+
+
+%.o: %.c $(HDRS) Makefile $(SPEC_DEPENDS)
+ $(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
+
+%.o: %.S $(HDRS) Makefile $(SPEC_DEPENDS)
+ $(CC) $(ASFLAGS) $(CPPFLAGS) -c $< -o $@
+
+
+
+
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/x86_32.S
--- a/extras/mini-os/x86_32.S Wed Jan 17 19:55:48 2007 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,287 +0,0 @@
-#include <os.h>
-#include <xen/arch-x86_32.h>
-
-.section __xen_guest
- .ascii "GUEST_OS=Mini-OS"
- .ascii ",XEN_VER=xen-3.0"
- .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
- .ascii ",ELF_PADDR_OFFSET=0x0"
- .ascii ",HYPERCALL_PAGE=0x2"
-#ifdef CONFIG_X86_PAE
- .ascii ",PAE=yes"
-#else
- .ascii ",PAE=no"
-#endif
- .ascii ",LOADER=generic"
- .byte 0
-.text
-
-.globl _start, shared_info, hypercall_page
-
-_start:
- cld
- lss stack_start,%esp
- push %esi
- call start_kernel
-
-stack_start:
- .long stack+8192, __KERNEL_SS
-
- /* Unpleasant -- the PTE that maps this page is actually overwritten */
- /* to map the real shared-info page! :-) */
- .org 0x1000
-shared_info:
- .org 0x2000
-
-hypercall_page:
- .org 0x3000
-
-ES = 0x20
-ORIG_EAX = 0x24
-EIP = 0x28
-CS = 0x2C
-
-#define ENTRY(X) .globl X ; X :
-
-#define SAVE_ALL \
- cld; \
- pushl %es; \
- pushl %ds; \
- pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- movl $(__KERNEL_DS),%edx; \
- movl %edx,%ds; \
- movl %edx,%es;
-
-#define RESTORE_ALL \
- popl %ebx; \
- popl %ecx; \
- popl %edx; \
- popl %esi; \
- popl %edi; \
- popl %ebp; \
- popl %eax; \
- popl %ds; \
- popl %es; \
- addl $4,%esp; \
- iret; \
-
-ENTRY(divide_error)
- pushl $0 # no error code
- pushl $do_divide_error
-do_exception:
- pushl %ds
- pushl %eax
- xorl %eax, %eax
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %edx
- decl %eax # eax = -1
- pushl %ecx
- pushl %ebx
- cld
- movl %es, %ecx
- movl ES(%esp), %edi # get the function address
- movl ORIG_EAX(%esp), %edx # get the error code
- movl %eax, ORIG_EAX(%esp)
- movl %ecx, ES(%esp)
- movl $(__KERNEL_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
- movl %esp,%eax # pt_regs pointer
- pushl %edx
- pushl %eax
- call *%edi
- jmp ret_from_exception
-
-ret_from_exception:
- movb CS(%esp),%cl
- test $2,%cl # slow return to ring 2 or 3
- jne safesti
- RESTORE_ALL
-
-# A note on the "critical region" in our callback handler.
-# We want to avoid stacking callback handlers due to events occurring
-# during handling of the last event. To do this, we keep events disabled
-# until weve done all processing. HOWEVER, we must enable events before
-# popping the stack frame (cant be done atomically) and so it would still
-# be possible to get enough handler activations to overflow the stack.
-# Although unlikely, bugs of that kind are hard to track down, so wed
-# like to avoid the possibility.
-# So, on entry to the handler we detect whether we interrupted an
-# existing activation in its critical region -- if so, we pop the current
-# activation and restart the handler using the previous one.
-ENTRY(hypervisor_callback)
- pushl %eax
- SAVE_ALL
- movl EIP(%esp),%eax
- cmpl $scrit,%eax
- jb 11f
- cmpl $ecrit,%eax
- jb critical_region_fixup
-11: push %esp
- call do_hypervisor_callback
- add $4,%esp
- movl HYPERVISOR_shared_info,%esi
- xorl %eax,%eax
- movb CS(%esp),%cl
- test $2,%cl # slow return to ring 2 or 3
- jne safesti
-safesti:movb $0,1(%esi) # reenable event callbacks
-scrit: /**** START OF CRITICAL REGION ****/
- testb $0xFF,(%esi)
- jnz 14f # process more events if necessary...
- RESTORE_ALL
-14: movb $1,1(%esi)
- jmp 11b
-ecrit: /**** END OF CRITICAL REGION ****/
-# [How we do the fixup]. We want to merge the current stack frame with the
-# just-interrupted frame. How we do this depends on where in the critical
-# region the interrupted handler was executing, and so how many saved
-# registers are in each frame. We do this quickly using the lookup table
-# 'critical_fixup_table'. For each byte offset in the critical region, it
-# provides the number of bytes which have already been popped from the
-# interrupted stack frame.
-critical_region_fixup:
- addl $critical_fixup_table-scrit,%eax
- movzbl (%eax),%eax # %eax contains num bytes popped
- mov %esp,%esi
- add %eax,%esi # %esi points at end of src region
- mov %esp,%edi
- add $0x34,%edi # %edi points at end of dst region
- mov %eax,%ecx
- shr $2,%ecx # convert words to bytes
- je 16f # skip loop if nothing to copy
-15: subl $4,%esi # pre-decrementing copy loop
- subl $4,%edi
- movl (%esi),%eax
- movl %eax,(%edi)
- loop 15b
-16: movl %edi,%esp # final %edi is top of merged stack
- jmp 11b
-
-critical_fixup_table:
- .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
- .byte 0x00,0x00 # jne 14f
- .byte 0x00 # pop %ebx
- .byte 0x04 # pop %ecx
- .byte 0x08 # pop %edx
- .byte 0x0c # pop %esi
- .byte 0x10 # pop %edi
- .byte 0x14 # pop %ebp
- .byte 0x18 # pop %eax
- .byte 0x1c # pop %ds
- .byte 0x20 # pop %es
- .byte 0x24,0x24,0x24 # add $4,%esp
- .byte 0x28 # iret
- .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
- .byte 0x00,0x00 # jmp 11b
-
-# Hypervisor uses this for application faults while it executes.
-ENTRY(failsafe_callback)
- pop %ds
- pop %es
- pop %fs
- pop %gs
- iret
-
-ENTRY(coprocessor_error)
- pushl $0
- pushl $do_coprocessor_error
- jmp do_exception
-
-ENTRY(simd_coprocessor_error)
- pushl $0
- pushl $do_simd_coprocessor_error
- jmp do_exception
-
-ENTRY(device_not_available)
- iret
-
-ENTRY(debug)
- pushl $0
- pushl $do_debug
- jmp do_exception
-
-ENTRY(int3)
- pushl $0
- pushl $do_int3
- jmp do_exception
-
-ENTRY(overflow)
- pushl $0
- pushl $do_overflow
- jmp do_exception
-
-ENTRY(bounds)
- pushl $0
- pushl $do_bounds
- jmp do_exception
-
-ENTRY(invalid_op)
- pushl $0
- pushl $do_invalid_op
- jmp do_exception
-
-
-ENTRY(coprocessor_segment_overrun)
- pushl $0
- pushl $do_coprocessor_segment_overrun
- jmp do_exception
-
-
-ENTRY(invalid_TSS)
- pushl $do_invalid_TSS
- jmp do_exception
-
-
-ENTRY(segment_not_present)
- pushl $do_segment_not_present
- jmp do_exception
-
-
-ENTRY(stack_segment)
- pushl $do_stack_segment
- jmp do_exception
-
-
-ENTRY(general_protection)
- pushl $do_general_protection
- jmp do_exception
-
-
-ENTRY(alignment_check)
- pushl $do_alignment_check
- jmp do_exception
-
-
-ENTRY(page_fault)
- pushl $do_page_fault
- jmp do_exception
-
-ENTRY(machine_check)
- pushl $0
- pushl $do_machine_check
- jmp do_exception
-
-
-ENTRY(spurious_interrupt_bug)
- pushl $0
- pushl $do_spurious_interrupt_bug
- jmp do_exception
-
-
-
-ENTRY(thread_starter)
- popl %eax
- popl %ebx
- pushl %eax
- call *%ebx
- call exit_thread
-
diff -r 7e9077dd4010 -r 26c75e0e48ed extras/mini-os/x86_64.S
--- a/extras/mini-os/x86_64.S Wed Jan 17 19:55:48 2007 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,385 +0,0 @@
-#include <os.h>
-#include <xen/features.h>
-
-.section __xen_guest
- .ascii "GUEST_OS=Mini-OS"
- .ascii ",XEN_VER=xen-3.0"
- .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
- .ascii ",ELF_PADDR_OFFSET=0x0"
- .ascii ",HYPERCALL_PAGE=0x2"
- .ascii ",LOADER=generic"
- .byte 0
-.text
-
-#define ENTRY(X) .globl X ; X :
-.globl _start, shared_info, hypercall_page
-
-
-_start:
- cld
- movq stack_start(%rip),%rsp
- movq %rsi,%rdi
- call start_kernel
-
-stack_start:
- .quad stack+8192
-
- /* Unpleasant -- the PTE that maps this page is actually overwritten */
- /* to map the real shared-info page! :-) */
- .org 0x1000
-shared_info:
- .org 0x2000
-
-hypercall_page:
- .org 0x3000
-
-
-/* Offsets into shared_info_t. */
-#define evtchn_upcall_pending /* 0 */
-#define evtchn_upcall_mask 1
-
-NMI_MASK = 0x80000000
-
-#define RDI 112
-#define ORIG_RAX 120 /* + error_code */
-#define EFLAGS 144
-
-#define REST_SKIP 6*8
-.macro SAVE_REST
- subq $REST_SKIP,%rsp
-# CFI_ADJUST_CFA_OFFSET REST_SKIP
- movq %rbx,5*8(%rsp)
-# CFI_REL_OFFSET rbx,5*8
- movq %rbp,4*8(%rsp)
-# CFI_REL_OFFSET rbp,4*8
- movq %r12,3*8(%rsp)
-# CFI_REL_OFFSET r12,3*8
- movq %r13,2*8(%rsp)
-# CFI_REL_OFFSET r13,2*8
- movq %r14,1*8(%rsp)
-# CFI_REL_OFFSET r14,1*8
- movq %r15,(%rsp)
-# CFI_REL_OFFSET r15,0*8
-.endm
-
-
-.macro RESTORE_REST
- movq (%rsp),%r15
-# CFI_RESTORE r15
- movq 1*8(%rsp),%r14
-# CFI_RESTORE r14
- movq 2*8(%rsp),%r13
-# CFI_RESTORE r13
- movq 3*8(%rsp),%r12
-# CFI_RESTORE r12
- movq 4*8(%rsp),%rbp
-# CFI_RESTORE rbp
- movq 5*8(%rsp),%rbx
-# CFI_RESTORE rbx
- addq $REST_SKIP,%rsp
-# CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
-.endm
-
-
-#define ARG_SKIP 9*8
-.macro RESTORE_ARGS
skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
- .if \skipr11
- .else
- movq (%rsp),%r11
-# CFI_RESTORE r11
- .endif
- .if \skipr8910
- .else
- movq 1*8(%rsp),%r10
-# CFI_RESTORE r10
- movq 2*8(%rsp),%r9
-# CFI_RESTORE r9
- movq 3*8(%rsp),%r8
-# CFI_RESTORE r8
- .endif
- .if \skiprax
- .else
- movq 4*8(%rsp),%rax
-# CFI_RESTORE rax
- .endif
- .if \skiprcx
- .else
- movq 5*8(%rsp),%rcx
-# CFI_RESTORE rcx
- .endif
- .if \skiprdx
- .else
- movq 6*8(%rsp),%rdx
-# CFI_RESTORE rdx
- .endif
- movq 7*8(%rsp),%rsi
-# CFI_RESTORE rsi
- movq 8*8(%rsp),%rdi
-# CFI_RESTORE rdi
- .if ARG_SKIP+\addskip > 0
- addq $ARG_SKIP+\addskip,%rsp
-# CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
- .endif
-.endm
-
-
-.macro HYPERVISOR_IRET flag
-# testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */
-# jnz 2f /* there is no userspace? */
- testl $NMI_MASK,2*8(%rsp)
- jnz 2f
-
- testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
- jnz 1f
-
- /* Direct iret to kernel space. Correct CS and SS. */
- orb $3,1*8(%rsp)
- orb $3,4*8(%rsp)
-1: iretq
-
-2: /* Slow iret via hypervisor. */
- andl $~NMI_MASK, 16(%rsp)
- pushq $\flag
- jmp hypercall_page + (__HYPERVISOR_iret * 32)
-.endm
-
-/*
- * Exception entry point. This expects an error code/orig_rax on the stack
- * and the exception handler in %rax.
- */
-ENTRY(error_entry)
-# _frame RDI
- /* rdi slot contains rax, oldrax contains error code */
- cld
- subq $14*8,%rsp
-# CFI_ADJUST_CFA_OFFSET (14*8)
- movq %rsi,13*8(%rsp)
-# CFI_REL_OFFSET rsi,RSI
- movq 14*8(%rsp),%rsi /* load rax from rdi slot */
- movq %rdx,12*8(%rsp)
-# CFI_REL_OFFSET rdx,RDX
- movq %rcx,11*8(%rsp)
-# CFI_REL_OFFSET rcx,RCX
- movq %rsi,10*8(%rsp) /* store rax */
-# CFI_REL_OFFSET rax,RAX
- movq %r8, 9*8(%rsp)
-# CFI_REL_OFFSET r8,R8
- movq %r9, 8*8(%rsp)
-# CFI_REL_OFFSET r9,R9
- movq %r10,7*8(%rsp)
-# CFI_REL_OFFSET r10,R10
- movq %r11,6*8(%rsp)
-# CFI_REL_OFFSET r11,R11
- movq %rbx,5*8(%rsp)
-# CFI_REL_OFFSET rbx,RBX
- movq %rbp,4*8(%rsp)
-# CFI_REL_OFFSET rbp,RBP
- movq %r12,3*8(%rsp)
-# CFI_REL_OFFSET r12,R12
- movq %r13,2*8(%rsp)
-# CFI_REL_OFFSET r13,R13
- movq %r14,1*8(%rsp)
-# CFI_REL_OFFSET r14,R14
- movq %r15,(%rsp)
-# CFI_REL_OFFSET r15,R15
-#if 0
- cmpl $__KERNEL_CS,CS(%rsp)
- je error_kernelspace
-#endif
-error_call_handler:
- movq %rdi, RDI(%rsp)
- movq %rsp,%rdi
- movq ORIG_RAX(%rsp),%rsi # get error code
- movq $-1,ORIG_RAX(%rsp)
- call *%rax
-
-.macro zeroentry sym
-# INTR_FRAME
- movq (%rsp),%rcx
- movq 8(%rsp),%r11
- addq $0x10,%rsp /* skip rcx and r11 */
- pushq $0 /* push error code/oldrax */
-# CFI_ADJUST_CFA_OFFSET 8
- pushq %rax /* push real oldrax to the rdi slot */
-# CFI_ADJUST_CFA_OFFSET 8
- leaq \sym(%rip),%rax
- jmp error_entry
-# CFI_ENDPROC
-.endm
-
-.macro errorentry sym
-# XCPT_FRAME
- movq (%rsp),%rcx
- movq 8(%rsp),%r11
- addq $0x10,%rsp /* rsp points to the error code */
- pushq %rax
-# CFI_ADJUST_CFA_OFFSET 8
- leaq \sym(%rip),%rax
- jmp error_entry
-# CFI_ENDPROC
-.endm
-
-#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
-#define XEN_PUT_VCPU_INFO(reg)
-#define XEN_PUT_VCPU_INFO_fixup
-#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
-
-#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
- XEN_LOCKED_BLOCK_EVENTS(reg) ; \
- XEN_PUT_VCPU_INFO(reg)
-
-#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg)
; \
- XEN_LOCKED_UNBLOCK_EVENTS(reg)
; \
- XEN_PUT_VCPU_INFO(reg)
-
-
-
-ENTRY(hypervisor_callback)
- zeroentry hypervisor_callback2
-
-ENTRY(hypervisor_callback2)
- movq %rdi, %rsp
-11: movq %gs:8,%rax
- incl %gs:0
- cmovzq %rax,%rsp
- pushq %rdi
- call do_hypervisor_callback
- popq %rsp
- decl %gs:0
- jmp error_exit
-
-# ALIGN
-restore_all_enable_events:
- XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
-
-scrit: /**** START OF CRITICAL REGION ****/
- XEN_TEST_PENDING(%rsi)
- jnz 14f # process more events if necessary...
- XEN_PUT_VCPU_INFO(%rsi)
- RESTORE_ARGS 0,8,0
- HYPERVISOR_IRET 0
-
-14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
- XEN_PUT_VCPU_INFO(%rsi)
- SAVE_REST
- movq %rsp,%rdi # set the argument again
- jmp 11b
-ecrit: /**** END OF CRITICAL REGION ****/
-
-
-retint_kernel:
-retint_restore_args:
- movl EFLAGS-REST_SKIP(%rsp), %eax
- shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
- XEN_GET_VCPU_INFO(%rsi)
- andb evtchn_upcall_mask(%rsi),%al
- andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
- jnz restore_all_enable_events # != 0 => enable event delivery
- XEN_PUT_VCPU_INFO(%rsi)
-
- RESTORE_ARGS 0,8,0
- HYPERVISOR_IRET 0
-
-
-error_exit:
- RESTORE_REST
-/* cli */
- XEN_BLOCK_EVENTS(%rsi)
- jmp retint_kernel
-
-
-
-ENTRY(failsafe_callback)
- popq %rcx
- popq %r11
- iretq
-
-
-ENTRY(coprocessor_error)
- zeroentry do_coprocessor_error
-
-
-ENTRY(simd_coprocessor_error)
- zeroentry do_simd_coprocessor_error
-
-
-ENTRY(device_not_available)
- zeroentry do_device_not_available
-
-
-ENTRY(debug)
-# INTR_FRAME
-# CFI_ADJUST_CFA_OFFSET 8 */
- zeroentry do_debug
-# CFI_ENDPROC
-
-
-ENTRY(int3)
-# INTR_FRAME
-# CFI_ADJUST_CFA_OFFSET 8 */
- zeroentry do_int3
-# CFI_ENDPROC
-
-ENTRY(overflow)
- zeroentry do_overflow
-
-
-ENTRY(bounds)
- zeroentry do_bounds
-
-
-ENTRY(invalid_op)
- zeroentry do_invalid_op
-
-
-ENTRY(coprocessor_segment_overrun)
- zeroentry do_coprocessor_segment_overrun
-
-
-ENTRY(invalid_TSS)
- errorentry do_invalid_TSS
-
-
-ENTRY(segment_not_present)
- errorentry do_segment_not_present
-
-
-/* runs on exception stack */
-ENTRY(stack_segment)
-# XCPT_FRAME
- errorentry do_stack_segment
-# CFI_ENDPROC
-
-
-ENTRY(general_protection)
- errorentry do_general_protection
-
-
-ENTRY(alignment_check)
- errorentry do_alignment_check
-
-
-ENTRY(divide_error)
- zeroentry do_divide_error
-
-
-ENTRY(spurious_interrupt_bug)
- zeroentry do_spurious_interrupt_bug
-
-
-ENTRY(page_fault)
- errorentry do_page_fault
-
-
-
-
-
-ENTRY(thread_starter)
- popq %rdi
- popq %rbx
- call *%rbx
- call exit_thread
-
-
diff -r 7e9077dd4010 -r 26c75e0e48ed
linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c Wed Jan 17
19:55:48 2007 -0700
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c Wed Jan 17
21:31:22 2007 -0700
@@ -85,7 +85,7 @@ static ssize_t microcode_write (struct f
{
ssize_t ret;
- if (len < DEFAULT_UCODE_TOTALSIZE) {
+ if (len < MC_HEADER_SIZE) {
printk(KERN_ERR "microcode: not enough data\n");
return -EINVAL;
}
diff -r 7e9077dd4010 -r 26c75e0e48ed
linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c Wed Jan 17 19:55:48
2007 -0700
+++ b/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c Wed Jan 17 21:31:22
2007 -0700
@@ -232,9 +232,12 @@ static void dump_fault_path(unsigned lon
p += (address >> 21) * 2;
printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
page, p[1], p[0]);
-#ifndef CONFIG_HIGHPTE
+ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
+#ifdef CONFIG_HIGHPTE
+ if (mfn_to_pfn(mfn) >= highstart_pfn)
+ return;
+#endif
if (p[0] & 1) {
- mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
page = mfn_to_pfn(mfn) << PAGE_SHIFT;
p = (unsigned long *) __va(page);
address &= 0x001fffff;
@@ -242,7 +245,6 @@ static void dump_fault_path(unsigned lon
printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
page, p[1], p[0]);
}
-#endif
}
}
#else
@@ -254,13 +256,16 @@ static void dump_fault_path(unsigned lon
page = ((unsigned long *) __va(page))[address >> 22];
printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
machine_to_phys(page));
+#ifdef CONFIG_HIGHPTE
/*
* We must not directly access the pte in the highpte
- * case, the page table might be allocated in highmem.
+ * case if the page table is located in highmem.
* And lets rather not kmap-atomic the pte, just in case
* it's allocated already.
*/
-#ifndef CONFIG_HIGHPTE
+ if ((page >> PAGE_SHIFT) >= highstart_pfn)
+ return;
+#endif
if (page & 1) {
page &= PAGE_MASK;
address &= 0x003ff000;
@@ -269,7 +274,6 @@ static void dump_fault_path(unsigned lon
printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
machine_to_phys(page));
}
-#endif
}
#endif
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/common/Makefile
--- a/tools/libfsimage/common/Makefile Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/common/Makefile Wed Jan 17 21:31:22 2007 -0700
@@ -1,7 +1,7 @@ XEN_ROOT = ../../..
XEN_ROOT = ../../..
include $(XEN_ROOT)/tools/Rules.mk
-MAJOR = 1.0
+MAJOR = 1.1
MINOR = 0
CFLAGS += -Werror -Wp,-MD,.$(@F).d
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/common/fsimage.c
--- a/tools/libfsimage/common/fsimage.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/common/fsimage.c Wed Jan 17 21:31:22 2007 -0700
@@ -74,7 +74,7 @@ void fsi_close_fsimage(fsi_t *fsi)
pthread_mutex_lock(&fsi_lock);
fsi->f_plugin->fp_ops->fpo_umount(fsi);
(void) close(fsi->f_fd);
- fsip_fs_free(fsi);
+ free(fsi);
pthread_mutex_unlock(&fsi_lock);
}
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/common/fsimage_grub.c
--- a/tools/libfsimage/common/fsimage_grub.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/common/fsimage_grub.c Wed Jan 17 21:31:22 2007 -0700
@@ -193,6 +193,7 @@ static int
static int
fsig_umount(fsi_t *fsi)
{
+ free(fsi->f_data);
return (0);
}
@@ -250,6 +251,7 @@ static int
static int
fsig_close(fsi_file_t *ffi)
{
+ free(ffi->ff_data);
fsip_file_free(ffi);
return (0);
}
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/common/fsimage_plugin.c
--- a/tools/libfsimage/common/fsimage_plugin.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/common/fsimage_plugin.c Wed Jan 17 21:31:22 2007 -0700
@@ -40,13 +40,6 @@ fsip_fs_set_data(fsi_t *fsi, void *data)
fsi->f_data = data;
}
-void
-fsip_fs_free(fsi_t *fsi)
-{
- free(fsi->f_data);
- free(fsi);
-}
-
fsi_file_t *
fsip_file_alloc(fsi_t *fsi, void *data)
{
@@ -64,7 +57,6 @@ void
void
fsip_file_free(fsi_file_t *ffi)
{
- free(ffi->ff_data);
free(ffi);
}
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/common/fsimage_plugin.h
--- a/tools/libfsimage/common/fsimage_plugin.h Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/common/fsimage_plugin.h Wed Jan 17 21:31:22 2007 -0700
@@ -50,11 +50,10 @@ typedef fsi_plugin_ops_t *
(*fsi_plugin_init_t)(int, fsi_plugin_t *, const char **);
void fsip_fs_set_data(fsi_t *, void *);
-void fsip_fs_free(fsi_t *);
fsi_file_t *fsip_file_alloc(fsi_t *, void *);
void fsip_file_free(fsi_file_t *);
-fsi_t * fsip_fs(fsi_file_t *ffi);
-uint64_t fsip_fs_offset(fsi_t *fsi);
+fsi_t *fsip_fs(fsi_file_t *);
+uint64_t fsip_fs_offset(fsi_t *);
void *fsip_fs_data(fsi_t *);
void *fsip_file_data(fsi_file_t *);
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/common/mapfile-GNU
--- a/tools/libfsimage/common/mapfile-GNU Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/common/mapfile-GNU Wed Jan 17 21:31:22 2007 -0700
@@ -1,5 +1,5 @@ VERSION {
VERSION {
- libfsimage.so.1.1 {
+ libfsimage.so.1.0 {
global:
fsi_open_fsimage;
fsi_close_fsimage;
@@ -10,7 +10,6 @@ VERSION {
fsi_pread_file;
fsip_fs_set_data;
- fsip_fs_free;
fsip_file_alloc;
fsip_file_free;
fsip_fs;
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/common/mapfile-SunOS
--- a/tools/libfsimage/common/mapfile-SunOS Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/common/mapfile-SunOS Wed Jan 17 21:31:22 2007 -0700
@@ -1,4 +1,4 @@ libfsimage.so.1.1 {
-libfsimage.so.1.1 {
+libfsimage.so.1.0 {
global:
fsi_open_fsimage;
fsi_close_fsimage;
@@ -9,7 +9,6 @@ libfsimage.so.1.1 {
fsi_pread_file;
fsip_fs_set_data;
- fsip_fs_free;
fsip_file_alloc;
fsip_file_free;
fsip_fs;
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libfsimage/ext2fs-lib/ext2fs-lib.c
--- a/tools/libfsimage/ext2fs-lib/ext2fs-lib.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libfsimage/ext2fs-lib/ext2fs-lib.c Wed Jan 17 21:31:22 2007 -0700
@@ -58,9 +58,11 @@ ext2lib_umount(fsi_t *fsi)
{
ext2_filsys *fs = fsip_fs_data(fsi);
if (ext2fs_close(*fs) != 0) {
+ free(fs);
errno = EINVAL;
return (-1);
}
+ free(fs);
return (0);
}
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libxc/xc_linux_build.c Wed Jan 17 21:31:22 2007 -0700
@@ -741,7 +741,7 @@ static int setup_guest(int xc_handle,
/*
* Enable shadow translate mode. This must happen after
* populate physmap because the p2m reservation is based on
- * the domains current memory allocation.
+ * the domain's current memory allocation.
*/
if ( xc_shadow_control(xc_handle, dom,
XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE,
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libxc/xc_linux_restore.c Wed Jan 17 21:31:22 2007 -0700
@@ -12,7 +12,7 @@
#include "xg_private.h"
#include "xg_save_restore.h"
-/* max mfn of the whole machine */
+/* max mfn of the current host machine */
static unsigned long max_mfn;
/* virtual starting address of the hypervisor */
@@ -29,6 +29,9 @@ static xen_pfn_t *live_p2m = NULL;
/* A table mapping each PFN to its new MFN. */
static xen_pfn_t *p2m = NULL;
+
+/* A table of P2M mappings in the current region */
+static xen_pfn_t *p2m_batch = NULL;
static ssize_t
@@ -57,46 +60,78 @@ read_exact(int fd, void *buf, size_t cou
** This function inverts that operation, replacing the pfn values with
** the (now known) appropriate mfn values.
*/
-static int uncanonicalize_pagetable(unsigned long type, void *page)
+static int uncanonicalize_pagetable(int xc_handle, uint32_t dom,
+ unsigned long type, void *page)
{
int i, pte_last;
unsigned long pfn;
uint64_t pte;
+ int nr_mfns = 0;
pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
- /* Now iterate through the page table, uncanonicalizing each PTE */
+ /* First pass: work out how many (if any) MFNs we need to alloc */
for(i = 0; i < pte_last; i++) {
-
+
if(pt_levels == 2)
pte = ((uint32_t *)page)[i];
else
pte = ((uint64_t *)page)[i];
-
- if(pte & _PAGE_PRESENT) {
-
- pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
-
- if(pfn >= max_pfn) {
- /* This "page table page" is probably not one; bail. */
- ERROR("Frame number in type %lu page table is out of range: "
- "i=%d pfn=0x%lx max_pfn=%lu",
- type >> 28, i, pfn, max_pfn);
- return 0;
- }
-
-
- pte &= 0xffffff0000000fffULL;
- pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
-
- if(pt_levels == 2)
- ((uint32_t *)page)[i] = (uint32_t)pte;
- else
- ((uint64_t *)page)[i] = (uint64_t)pte;
-
-
-
- }
+
+ /* XXX SMH: below needs fixing for PROT_NONE etc */
+ if(!(pte & _PAGE_PRESENT))
+ continue;
+
+ pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
+
+ if(pfn >= max_pfn) {
+ /* This "page table page" is probably not one; bail. */
+ ERROR("Frame number in type %lu page table is out of range: "
+ "i=%d pfn=0x%lx max_pfn=%lu",
+ type >> 28, i, pfn, max_pfn);
+ return 0;
+ }
+
+ if(p2m[pfn] == INVALID_P2M_ENTRY) {
+ /* Have a 'valid' PFN without a matching MFN - need to alloc */
+ p2m_batch[nr_mfns++] = pfn;
+ }
+ }
+
+
+ /* Alllocate the requistite number of mfns */
+ if (nr_mfns && xc_domain_memory_populate_physmap(
+ xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) {
+ ERROR("Failed to allocate memory for batch.!\n");
+ errno = ENOMEM;
+ return 0;
+ }
+
+ /* Second pass: uncanonicalize each present PTE */
+ nr_mfns = 0;
+ for(i = 0; i < pte_last; i++) {
+
+ if(pt_levels == 2)
+ pte = ((uint32_t *)page)[i];
+ else
+ pte = ((uint64_t *)page)[i];
+
+ /* XXX SMH: below needs fixing for PROT_NONE etc */
+ if(!(pte & _PAGE_PRESENT))
+ continue;
+
+ pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
+
+ if(p2m[pfn] == INVALID_P2M_ENTRY)
+ p2m[pfn] = p2m_batch[nr_mfns++];
+
+ pte &= 0xffffff0000000fffULL;
+ pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
+
+ if(pt_levels == 2)
+ ((uint32_t *)page)[i] = (uint32_t)pte;
+ else
+ ((uint64_t *)page)[i] = (uint64_t)pte;
}
return 1;
@@ -140,6 +175,7 @@ int xc_linux_restore(int xc_handle, int
/* A temporary mapping of the guest's start_info page. */
start_info_t *start_info;
+ /* Our mapping of the current region (batch) */
char *region_base;
xc_mmu_t *mmu = NULL;
@@ -244,8 +280,10 @@ int xc_linux_restore(int xc_handle, int
p2m = calloc(max_pfn, sizeof(xen_pfn_t));
pfn_type = calloc(max_pfn, sizeof(unsigned long));
region_mfn = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
-
- if ((p2m == NULL) || (pfn_type == NULL) || (region_mfn == NULL)) {
+ p2m_batch = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
+
+ if ((p2m == NULL) || (pfn_type == NULL) ||
+ (region_mfn == NULL) || (p2m_batch == NULL)) {
ERROR("memory alloc failed");
errno = ENOMEM;
goto out;
@@ -253,6 +291,11 @@ int xc_linux_restore(int xc_handle, int
if (lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
ERROR("Could not lock region_mfn");
+ goto out;
+ }
+
+ if (lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
+ ERROR("Could not lock p2m_batch");
goto out;
}
@@ -270,17 +313,9 @@ int xc_linux_restore(int xc_handle, int
goto out;
}
+ /* Mark all PFNs as invalid; we allocate on demand */
for ( pfn = 0; pfn < max_pfn; pfn++ )
- p2m[pfn] = pfn;
-
- if (xc_domain_memory_populate_physmap(xc_handle, dom, max_pfn,
- 0, 0, p2m) != 0) {
- ERROR("Failed to increase reservation by %lx KB", PFN_TO_KB(max_pfn));
- errno = ENOMEM;
- goto out;
- }
-
- DPRINTF("Increased domain reservation by %lx KB\n", PFN_TO_KB(max_pfn));
+ p2m[pfn] = INVALID_P2M_ENTRY;
if(!(mmu = xc_init_mmu_updates(xc_handle, dom))) {
ERROR("Could not initialise for MMU updates");
@@ -298,7 +333,7 @@ int xc_linux_restore(int xc_handle, int
n = 0;
while (1) {
- int j;
+ int j, nr_mfns = 0;
this_pc = (n * 100) / max_pfn;
if ( (this_pc - prev_pc) >= 5 )
@@ -333,20 +368,57 @@ int xc_linux_restore(int xc_handle, int
goto out;
}
+ /* First pass for this batch: work out how much memory to alloc */
+ nr_mfns = 0;
for ( i = 0; i < j; i++ )
{
unsigned long pfn, pagetype;
pfn = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
pagetype = region_pfn_type[i] & XEN_DOMCTL_PFINFO_LTAB_MASK;
+ if ( (pagetype != XEN_DOMCTL_PFINFO_XTAB) &&
+ (p2m[pfn] == INVALID_P2M_ENTRY) )
+ {
+ /* Have a live PFN which hasn't had an MFN allocated */
+ p2m_batch[nr_mfns++] = pfn;
+ }
+ }
+
+
+ /* Now allocate a bunch of mfns for this batch */
+ if (nr_mfns && xc_domain_memory_populate_physmap(
+ xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) {
+ ERROR("Failed to allocate memory for batch.!\n");
+ errno = ENOMEM;
+ goto out;
+ }
+
+ /* Second pass for this batch: update p2m[] and region_mfn[] */
+ nr_mfns = 0;
+ for ( i = 0; i < j; i++ )
+ {
+ unsigned long pfn, pagetype;
+ pfn = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
+ pagetype = region_pfn_type[i] & XEN_DOMCTL_PFINFO_LTAB_MASK;
+
if ( pagetype == XEN_DOMCTL_PFINFO_XTAB)
- region_mfn[i] = 0; /* we know map will fail, but don't care */
- else
- region_mfn[i] = p2m[pfn];
- }
-
+ region_mfn[i] = ~0UL; /* map will fail but we don't care */
+ else
+ {
+ if (p2m[pfn] == INVALID_P2M_ENTRY) {
+ /* We just allocated a new mfn above; update p2m */
+ p2m[pfn] = p2m_batch[nr_mfns++];
+ }
+
+ /* setup region_mfn[] for batch map */
+ region_mfn[i] = p2m[pfn];
+ }
+ }
+
+ /* Map relevant mfns */
region_base = xc_map_foreign_batch(
xc_handle, dom, PROT_WRITE, region_mfn, j);
+
if ( region_base == NULL )
{
ERROR("map batch failed");
@@ -401,7 +473,8 @@ int xc_linux_restore(int xc_handle, int
pae_extended_cr3 ||
(pagetype != XEN_DOMCTL_PFINFO_L1TAB)) {
- if (!uncanonicalize_pagetable(pagetype, page)) {
+ if (!uncanonicalize_pagetable(xc_handle, dom,
+ pagetype, page)) {
/*
** Failing to uncanonicalize a page table can be ok
** under live migration since the pages type may have
@@ -411,10 +484,8 @@ int xc_linux_restore(int xc_handle, int
pagetype >> 28, pfn, mfn);
nraces++;
continue;
- }
-
- }
-
+ }
+ }
}
else if ( pagetype != XEN_DOMCTL_PFINFO_NOTAB )
{
@@ -486,7 +557,7 @@ int xc_linux_restore(int xc_handle, int
*/
int j, k;
-
+
/* First pass: find all L3TABs current in > 4G mfns and get new mfns */
for ( i = 0; i < max_pfn; i++ )
{
@@ -555,7 +626,8 @@ int xc_linux_restore(int xc_handle, int
}
for(k = 0; k < j; k++) {
- if(!uncanonicalize_pagetable(XEN_DOMCTL_PFINFO_L1TAB,
+ if(!uncanonicalize_pagetable(xc_handle, dom,
+ XEN_DOMCTL_PFINFO_L1TAB,
region_base + k*PAGE_SIZE)) {
ERROR("failed uncanonicalize pt!");
goto out;
@@ -631,7 +703,7 @@ int xc_linux_restore(int xc_handle, int
{
unsigned int count;
unsigned long *pfntab;
- int rc;
+ int nr_frees, rc;
if (!read_exact(io_fd, &count, sizeof(count))) {
ERROR("Error when reading pfn count");
@@ -648,29 +720,30 @@ int xc_linux_restore(int xc_handle, int
goto out;
}
+ nr_frees = 0;
for (i = 0; i < count; i++) {
unsigned long pfn = pfntab[i];
- if(pfn > max_pfn)
- /* shouldn't happen - continue optimistically */
- continue;
-
- pfntab[i] = p2m[pfn];
- p2m[pfn] = INVALID_P2M_ENTRY; // not in pseudo-physical map
- }
-
- if (count > 0) {
+ if(p2m[pfn] != INVALID_P2M_ENTRY) {
+ /* pfn is not in physmap now, but was at some point during
+ the save/migration process - need to free it */
+ pfntab[nr_frees++] = p2m[pfn];
+ p2m[pfn] = INVALID_P2M_ENTRY; // not in pseudo-physical map
+ }
+ }
+
+ if (nr_frees > 0) {
struct xen_memory_reservation reservation = {
- .nr_extents = count,
+ .nr_extents = nr_frees,
.extent_order = 0,
.domid = dom
};
set_xen_guest_handle(reservation.extent_start, pfntab);
if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
- &reservation)) != count) {
+ &reservation)) != nr_frees) {
ERROR("Could not decrease reservation : %d", rc);
goto out;
} else
@@ -791,6 +864,6 @@ int xc_linux_restore(int xc_handle, int
free(pfn_type);
DPRINTF("Restore exit with rc=%d\n", rc);
-
+
return rc;
}
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/libxc/xc_linux_save.c Wed Jan 17 21:31:22 2007 -0700
@@ -660,13 +660,6 @@ int xc_linux_save(int xc_handle, int io_
goto out;
}
- /* cheesy sanity check */
- if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
- ERROR("Invalid state record -- pfn count out of range: %lu",
- (info.max_memkb >> (PAGE_SHIFT - 10)));
- goto out;
- }
-
/* Map the shared info frame */
if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
PROT_READ, shared_info_frame))) {
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/pygrub/src/pygrub
--- a/tools/pygrub/src/pygrub Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/pygrub/src/pygrub Wed Jan 17 21:31:22 2007 -0700
@@ -405,6 +405,9 @@ class Grub:
c = self.screen.getch()
if mytime != -1:
mytime += 1
+ if mytime >= int(timeout):
+ self.isdone = True
+ break
# handle keypresses
if c == ord('c'):
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/python/xen/xend/XendCheckpoint.py Wed Jan 17 21:31:22 2007 -0700
@@ -147,18 +147,20 @@ def restore(xd, fd, dominfo = None, paus
assert store_port
assert console_port
+ nr_pfns = (dominfo.getMemoryTarget() + 3) / 4
+
try:
l = read_exact(fd, sizeof_unsigned_long,
"not a valid guest state file: pfn count read")
- nr_pfns = unpack("L", l)[0] # native sizeof long
- if nr_pfns > 16*1024*1024: # XXX
+ max_pfn = unpack("L", l)[0] # native sizeof long
+ if max_pfn > 16*1024*1024: # XXX
raise XendError(
"not a valid guest state file: pfn count out of range")
balloon.free(xc.pages_to_kib(nr_pfns))
cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
- fd, dominfo.getDomid(), nr_pfns,
+ fd, dominfo.getDomid(), max_pfn,
store_port, console_port])
log.debug("[xc_restore]: %s", string.join(cmd))
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/python/xen/xend/XendConfig.py Wed Jan 17 21:31:22 2007 -0700
@@ -126,6 +126,7 @@ XENAPI_CFG_TYPES = {
'memory_dynamic_min': int,
'memory_dynamic_max': int,
'memory_actual': int,
+ 'cpus': list,
'vcpus_policy': str,
'vcpus_params': str,
'vcpus_number': int,
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/python/xen/xend/XendNode.py Wed Jan 17 21:31:22 2007 -0700
@@ -365,14 +365,24 @@ class XendNode:
return [[k, info[k]] for k in ITEM_ORDER]
+ def xenschedinfo(self):
+ sched_id = self.xc.sched_id_get()
+ if sched_id == xen.lowlevel.xc.XEN_SCHEDULER_SEDF:
+ return 'sedf'
+ elif sched_id == xen.lowlevel.xc.XEN_SCHEDULER_CREDIT:
+ return 'credit'
+ else:
+ return 'unknown'
def xeninfo(self):
info = self.xc.xeninfo()
+ info['xen_scheduler'] = self.xenschedinfo()
ITEM_ORDER = ['xen_major',
'xen_minor',
'xen_extra',
'xen_caps',
+ 'xen_scheduler',
'xen_pagesize',
'platform_params',
'xen_changeset',
diff -r 7e9077dd4010 -r 26c75e0e48ed tools/tests/test_x86_emulator.c
--- a/tools/tests/test_x86_emulator.c Wed Jan 17 19:55:48 2007 -0700
+++ b/tools/tests/test_x86_emulator.c Wed Jan 17 21:31:22 2007 -0700
@@ -118,7 +118,8 @@ int main(int argc, char **argv)
#endif
ctxt.regs = ®s;
- ctxt.address_bytes = 4;
+ ctxt.addr_size = 32;
+ ctxt.sp_size = 32;
res = mmap((void *)0x100000, MMAP_SZ, PROT_READ|PROT_WRITE,
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/hvm/i8254.c Wed Jan 17 21:31:22 2007 -0700
@@ -182,11 +182,9 @@ void pit_time_fired(struct vcpu *v, void
s->count_load_time = hvm_get_guest_time(v);
}
-static inline void pit_load_count(PITChannelState *s, int val)
+static inline void pit_load_count(PITChannelState *s, int channel, int val)
{
u32 period;
- PITChannelState *ch0 =
- ¤t->domain->arch.hvm_domain.pl_time.vpit.channels[0];
if (val == 0)
val = 0x10000;
@@ -194,7 +192,7 @@ static inline void pit_load_count(PITCha
s->count = val;
period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
- if (s != ch0)
+ if (channel != 0)
return;
#ifdef DEBUG_PIT
@@ -282,17 +280,17 @@ static void pit_ioport_write(void *opaqu
switch(s->write_state) {
default:
case RW_STATE_LSB:
- pit_load_count(s, val);
+ pit_load_count(s, addr, val);
break;
case RW_STATE_MSB:
- pit_load_count(s, val << 8);
+ pit_load_count(s, addr, val << 8);
break;
case RW_STATE_WORD0:
s->write_latch = val;
s->write_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
- pit_load_count(s, s->write_latch | (val << 8));
+ pit_load_count(s, addr, s->write_latch | (val << 8));
s->write_state = RW_STATE_WORD0;
break;
}
@@ -369,7 +367,7 @@ static void pit_reset(void *opaque)
destroy_periodic_time(&s->pt);
s->mode = 0xff; /* the init mode */
s->gate = (i != 2);
- pit_load_count(s, 0);
+ pit_load_count(s, i, 0);
}
}
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Jan 17 21:31:22 2007 -0700
@@ -482,8 +482,8 @@ static int svm_guest_x86_mode(struct vcp
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- if ( vmcb->efer & EFER_LMA )
- return (vmcb->cs.attr.fields.l ? 8 : 4);
+ if ( (vmcb->efer & EFER_LMA) && vmcb->cs.attr.fields.l )
+ return 8;
if ( svm_realmode(v) )
return 2;
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jan 17 21:31:22 2007 -0700
@@ -491,8 +491,7 @@ static unsigned long vmx_get_segment_bas
ASSERT(v == current);
#ifdef __x86_64__
- if ( vmx_long_mode_enabled(v) &&
- (__vmread(GUEST_CS_AR_BYTES) & (1u<<13)) )
+ if ( vmx_long_mode_enabled(v) && (__vmread(GUEST_CS_AR_BYTES) & (1u<<13)) )
long_mode = 1;
#endif
@@ -667,8 +666,8 @@ static int vmx_guest_x86_mode(struct vcp
cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
- if ( vmx_long_mode_enabled(v) )
- return ((cs_ar_bytes & (1u<<13)) ? 8 : 4);
+ if ( vmx_long_mode_enabled(v) && (cs_ar_bytes & (1u<<13)) )
+ return 8;
if ( vmx_realmode(v) )
return 2;
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/microcode.c
--- a/xen/arch/x86/microcode.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/microcode.c Wed Jan 17 21:31:22 2007 -0700
@@ -249,14 +249,14 @@ static int find_matching_ucodes (void)
}
total_size = get_totalsize(&mc_header);
- if ((cursor + total_size > user_buffer_size) || (total_size <
DEFAULT_UCODE_TOTALSIZE)) {
+ if (cursor + total_size > user_buffer_size) {
printk(KERN_ERR "microcode: error! Bad data in
microcode data file\n");
error = -EINVAL;
goto out;
}
data_size = get_datasize(&mc_header);
- if ((data_size + MC_HEADER_SIZE > total_size) || (data_size <
DEFAULT_UCODE_DATASIZE)) {
+ if (data_size + MC_HEADER_SIZE > total_size) {
printk(KERN_ERR "microcode: error! Bad data in
microcode data file\n");
error = -EINVAL;
goto out;
@@ -459,11 +459,6 @@ int microcode_update(XEN_GUEST_HANDLE(vo
{
int ret;
- if (len < DEFAULT_UCODE_TOTALSIZE) {
- printk(KERN_ERR "microcode: not enough data\n");
- return -EINVAL;
- }
-
if (len != (typeof(user_buffer_size))len) {
printk(KERN_ERR "microcode: too much data\n");
return -E2BIG;
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/mm.c Wed Jan 17 21:31:22 2007 -0700
@@ -3236,15 +3236,14 @@ static int ptwr_emulated_update(
if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) )
{
if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) &&
- (bytes == 4) &&
- !do_cmpxchg &&
+ (bytes == 4) && (addr & 4) && !do_cmpxchg &&
(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
{
/*
- * If this is a half-write to a PAE PTE then we assume that the
- * guest has simply got the two writes the wrong way round. We
- * zap the PRESENT bit on the assumption the bottom half will be
- * written immediately after we return to the guest.
+ * If this is an upper-half write to a PAE PTE then we assume that
+ * the guest has simply got the two writes the wrong way round. We
+ * zap the PRESENT bit on the assumption that the bottom half will
+ * be written immediately after we return to the guest.
*/
MEM_LOG("ptwr_emulate: fixing up invalid PAE PTE %"PRIpte,
l1e_get_intpte(nl1e));
@@ -3375,8 +3374,9 @@ int ptwr_do_page_fault(struct vcpu *v, u
(page_get_owner(page) != d) )
goto bail;
- ptwr_ctxt.ctxt.regs = guest_cpu_user_regs();
- ptwr_ctxt.ctxt.address_bytes = IS_COMPAT(d) ? 4 : sizeof(long);
+ ptwr_ctxt.ctxt.regs = regs;
+ ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
+ IS_COMPAT(d) ? 32 : BITS_PER_LONG;
ptwr_ctxt.cr2 = addr;
ptwr_ctxt.pte = pte;
if ( x86_emulate(&ptwr_ctxt.ctxt, &ptwr_emulate_ops) )
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/mm/shadow/common.c Wed Jan 17 21:31:22 2007 -0700
@@ -110,7 +110,7 @@ static int hvm_translate_linear_addr(
unsigned long limit, addr = offset;
uint32_t last_byte;
- if ( sh_ctxt->ctxt.address_bytes != 8 )
+ if ( sh_ctxt->ctxt.addr_size != 64 )
{
/*
* COMPATIBILITY MODE: Apply segment checks and add base.
@@ -399,7 +399,7 @@ struct x86_emulate_ops *shadow_init_emul
struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
{
- struct segment_register *creg;
+ struct segment_register *creg, *sreg;
struct vcpu *v = current;
unsigned long addr;
@@ -407,7 +407,7 @@ struct x86_emulate_ops *shadow_init_emul
if ( !is_hvm_vcpu(v) )
{
- sh_ctxt->ctxt.address_bytes = sizeof(long);
+ sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = BITS_PER_LONG;
return &pv_shadow_emulator_ops;
}
@@ -416,12 +416,20 @@ struct x86_emulate_ops *shadow_init_emul
creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
/* Work out the emulation mode. */
- if ( hvm_long_mode_enabled(v) )
- sh_ctxt->ctxt.address_bytes = creg->attr.fields.l ? 8 : 4;
+ if ( hvm_long_mode_enabled(v) && creg->attr.fields.l )
+ {
+ sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
+ }
else if ( regs->eflags & X86_EFLAGS_VM )
- sh_ctxt->ctxt.address_bytes = 2;
+ {
+ sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 16;
+ }
else
- sh_ctxt->ctxt.address_bytes = creg->attr.fields.db ? 4 : 2;
+ {
+ sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+ sh_ctxt->ctxt.addr_size = creg->attr.fields.db ? 32 : 16;
+ sh_ctxt->ctxt.sp_size = sreg->attr.fields.db ? 32 : 16;
+ }
/* Attempt to prefetch whole instruction. */
sh_ctxt->insn_buf_bytes =
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/mm/shadow/multi.c Wed Jan 17 21:31:22 2007 -0700
@@ -3944,7 +3944,7 @@ sh_x86_emulate_write(struct vcpu *v, uns
if ( !skip ) sh_validate_guest_pt_write(v, mfn, addr, bytes);
/* If we are writing zeros to this page, might want to unshadow */
- if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
+ if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
check_for_early_unshadow(v, mfn);
sh_unmap_domain_page(addr);
@@ -3996,7 +3996,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
vaddr, prev, old, new, *(unsigned long *)addr, bytes);
/* If we are writing zeros to this page, might want to unshadow */
- if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
+ if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
check_for_early_unshadow(v, mfn);
sh_unmap_domain_page(addr);
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/mm/shadow/private.h Wed Jan 17 21:31:22 2007 -0700
@@ -427,6 +427,11 @@ extern int sh_remove_write_access(struct
#undef mfn_valid
#define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
+#if GUEST_PAGING_LEVELS >= 3
+# define is_lo_pte(_vaddr) (((_vaddr)&0x4)==0)
+#else
+# define is_lo_pte(_vaddr) (1)
+#endif
static inline int
sh_mfn_is_a_page_table(mfn_t gmfn)
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/arch/x86/x86_emulate.c Wed Jan 17 21:31:22 2007 -0700
@@ -443,10 +443,11 @@ do{ __asm__ __volatile__ (
})
#define insn_fetch_type(_type) ((_type)insn_fetch_bytes(sizeof(_type)))
-#define _truncate_ea(ea, byte_width) \
-({ unsigned long __ea = (ea); \
- (((byte_width) == sizeof(unsigned long)) ? __ea : \
- (__ea & ((1UL << ((byte_width) << 3)) - 1))); \
+#define _truncate_ea(ea, byte_width) \
+({ unsigned long __ea = (ea); \
+ unsigned int _width = (byte_width); \
+ ((_width == sizeof(unsigned long)) ? __ea : \
+ (__ea & ((1UL << (_width << 3)) - 1))); \
})
#define truncate_ea(ea) _truncate_ea((ea), ad_bytes)
@@ -473,16 +474,27 @@ static int even_parity(uint8_t v)
#define _register_address_increment(reg, inc, byte_width) \
do { \
int _inc = (inc); /* signed type ensures sign extension to long */ \
- if ( (byte_width) == sizeof(unsigned long) ) \
+ unsigned int _width = (byte_width); \
+ if ( _width == sizeof(unsigned long) ) \
(reg) += _inc; \
else if ( mode_64bit() ) \
- (reg) = ((reg) + _inc) & ((1UL << ((byte_width) << 3)) - 1); \
+ (reg) = ((reg) + _inc) & ((1UL << (_width << 3)) - 1); \
else \
- (reg) = ((reg) & ~((1UL << ((byte_width) << 3)) - 1)) | \
- (((reg) + _inc) & ((1UL << ((byte_width) << 3)) - 1)); \
+ (reg) = ((reg) & ~((1UL << (_width << 3)) - 1)) | \
+ (((reg) + _inc) & ((1UL << (_width << 3)) - 1)); \
} while (0)
#define register_address_increment(reg, inc) \
_register_address_increment((reg), (inc), ad_bytes)
+
+#define sp_pre_dec(dec) ({ \
+ _register_address_increment(_regs.esp, -(dec), ctxt->sp_size/8); \
+ _truncate_ea(_regs.esp, ctxt->sp_size/8); \
+})
+#define sp_post_inc(inc) ({ \
+ unsigned long __esp = _truncate_ea(_regs.esp, ctxt->sp_size/8); \
+ _register_address_increment(_regs.esp, (inc), ctxt->sp_size/8); \
+ __esp; \
+})
#define jmp_rel(rel) \
do { \
@@ -679,7 +691,7 @@ x86_emulate(
ea.mem.seg = x86_seg_ds;
ea.mem.off = 0;
- op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->address_bytes;
+ op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8;
if ( op_bytes == 8 )
{
op_bytes = def_op_bytes = 4;
@@ -1144,7 +1156,9 @@ x86_emulate(
break;
}
- case 0x80 ... 0x83: /* Grp1 */
+ case 0x82: /* Grp1 (x86/32 only) */
+ generate_exception_if(mode_64bit(), EXC_UD);
+ case 0x80: case 0x81: case 0x83: /* Grp1 */
switch ( modrm_reg & 7 )
{
case 0: goto add;
@@ -1194,10 +1208,9 @@ x86_emulate(
/* 64-bit mode: POP defaults to a 64-bit operand. */
if ( mode_64bit() && (dst.bytes == 4) )
dst.bytes = 8;
- if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
+ if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
&dst.val, dst.bytes, ctxt)) != 0 )
goto done;
- register_address_increment(_regs.esp, dst.bytes);
break;
case 0xb0 ... 0xb7: /* mov imm8,r8 */
@@ -1466,7 +1479,7 @@ x86_emulate(
emulate_1op("dec", dst, _regs.eflags);
break;
case 2: /* call (near) */
- case 3: /* jmp (near) */
+ case 4: /* jmp (near) */
if ( ((op_bytes = dst.bytes) != 8) && mode_64bit() )
{
dst.bytes = op_bytes = 8;
@@ -1488,8 +1501,7 @@ x86_emulate(
&dst.val, 8, ctxt)) != 0 )
goto done;
}
- register_address_increment(_regs.esp, -dst.bytes);
- if ( (rc = ops->write(x86_seg_ss, truncate_ea(_regs.esp),
+ if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
dst.val, dst.bytes, ctxt)) != 0 )
goto done;
dst.type = OP_NONE;
@@ -1644,10 +1656,9 @@ x86_emulate(
dst.bytes = op_bytes;
if ( mode_64bit() && (dst.bytes == 4) )
dst.bytes = 8;
- if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
+ if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
&dst.val, dst.bytes, ctxt)) != 0 )
goto done;
- register_address_increment(_regs.esp, dst.bytes);
break;
case 0x60: /* pusha */ {
@@ -1657,11 +1668,9 @@ x86_emulate(
_regs.esp, _regs.ebp, _regs.esi, _regs.edi };
generate_exception_if(mode_64bit(), EXC_UD);
for ( i = 0; i < 8; i++ )
- if ( (rc = ops->write(x86_seg_ss,
- truncate_ea(_regs.esp-(i+1)*op_bytes),
+ if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
regs[i], op_bytes, ctxt)) != 0 )
goto done;
- register_address_increment(_regs.esp, -8*op_bytes);
break;
}
@@ -1674,11 +1683,9 @@ x86_emulate(
(unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax };
generate_exception_if(mode_64bit(), EXC_UD);
for ( i = 0; i < 8; i++ )
- if ( (rc = ops->read(x86_seg_ss,
- truncate_ea(_regs.esp+i*op_bytes),
+ if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
regs[i], op_bytes, ctxt)) != 0 )
goto done;
- register_address_increment(_regs.esp, 8*op_bytes);
break;
}
@@ -1697,9 +1704,8 @@ x86_emulate(
if ( mode_64bit() && (dst.bytes == 4) )
dst.bytes = 8;
dst.val = src.val;
- register_address_increment(_regs.esp, -dst.bytes);
dst.mem.seg = x86_seg_ss;
- dst.mem.off = truncate_ea(_regs.esp);
+ dst.mem.off = sp_pre_dec(dst.bytes);
break;
case 0x70 ... 0x7f: /* jcc (short) */ {
@@ -1813,11 +1819,10 @@ x86_emulate(
case 0xc3: /* ret (near) */ {
int offset = (b == 0xc2) ? insn_fetch_type(uint16_t) : 0;
op_bytes = mode_64bit() ? 8 : op_bytes;
- if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
+ if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
&dst.val, op_bytes, ctxt)) != 0 )
goto done;
_regs.eip = dst.val;
- register_address_increment(_regs.esp, op_bytes + offset);
break;
}
@@ -2019,7 +2024,7 @@ x86_emulate(
case 0xbc: /* bsf */ {
int zf;
- asm ( "bsf %2,%0; setc %b1"
+ asm ( "bsf %2,%0; setz %b1"
: "=r" (dst.val), "=q" (zf)
: "r" (src.val), "1" (0) );
_regs.eflags &= ~EFLG_ZF;
@@ -2029,7 +2034,7 @@ x86_emulate(
case 0xbd: /* bsr */ {
int zf;
- asm ( "bsr %2,%0; setc %b1"
+ asm ( "bsr %2,%0; setz %b1"
: "=r" (dst.val), "=q" (zf)
: "r" (src.val), "1" (0) );
_regs.eflags &= ~EFLG_ZF;
@@ -2046,12 +2051,13 @@ x86_emulate(
break;
case 0xba: /* Grp8 */
- switch ( modrm_reg & 3 )
- {
- case 0: goto bt;
- case 1: goto bts;
- case 2: goto btr;
- case 3: goto btc;
+ switch ( modrm_reg & 7 )
+ {
+ case 4: goto bt;
+ case 5: goto bts;
+ case 6: goto btr;
+ case 7: goto btc;
+ default: generate_exception_if(1, EXC_UD);
}
break;
@@ -2100,6 +2106,7 @@ x86_emulate(
#if defined(__i386__)
{
unsigned long old_lo, old_hi;
+ generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &old_lo, 4, ctxt)) ||
(rc = ops->read(ea.mem.seg, ea.mem.off+4, &old_hi, 4, ctxt)) )
goto done;
@@ -2126,6 +2133,7 @@ x86_emulate(
#elif defined(__x86_64__)
{
unsigned long old, new;
+ generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) != 0 )
goto done;
if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) ||
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/asm-powerpc/byteorder.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-powerpc/byteorder.h Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,80 @@
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <xen/compiler.h>
+
+static inline __u16 ld_le16(const volatile __u16 *addr)
+{
+ __u16 val;
+
+ asm volatile ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static inline void st_le16(volatile __u16 *addr, const __u16 val)
+{
+ asm volatile ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline __u32 ld_le32(const volatile __u32 *addr)
+{
+ __u32 val;
+
+ asm volatile ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static inline void st_le32(volatile __u32 *addr, const __u32 val)
+{
+ asm volatile ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline __attribute_const__ __u16 ___arch__swab16(__u16 value)
+{
+ __u16 result;
+
+ asm("rlwimi %0,%1,8,16,23"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 8));
+ return result;
+}
+
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 value)
+{
+ __u32 result;
+
+ asm("rlwimi %0,%1,24,16,23\n\t"
+ "rlwimi %0,%1,8,8,15\n\t"
+ "rlwimi %0,%1,24,0,7"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 24));
+ return result;
+}
+
+#define __arch__swab16(x) ___arch__swab16(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+/* The same, but returns converted value from the location pointer by addr. */
+#define __arch__swab16p(addr) ld_le16(addr)
+#define __arch__swab32p(addr) ld_le32(addr)
+
+/* The same, but do the conversion in situ, ie. put the value back to addr. */
+#define __arch__swab16s(addr) st_le16(addr,*addr)
+#define __arch__swab32s(addr) st_le32(addr,*addr)
+
+#define __BYTEORDER_HAS_U64__
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+
+#include <xen/byteorder/big_endian.h>
+
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/asm-x86/byteorder.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/byteorder.h Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,36 @@
+#ifndef __ASM_X86_BYTEORDER_H__
+#define __ASM_X86_BYTEORDER_H__
+
+#include <asm/types.h>
+#include <xen/compiler.h>
+
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+ asm("bswap %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
+{
+ union {
+ struct { __u32 a,b; } s;
+ __u64 u;
+ } v;
+ v.u = val;
+ asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
+ : "=r" (v.s.a), "=r" (v.s.b)
+ : "0" (v.s.a), "1" (v.s.b));
+ return v.u;
+}
+
+/* Do not define swab16. Gcc is smart enough to recognize "C" version and
+ convert it into rotation or exhange. */
+
+#define __arch__swab64(x) ___arch__swab64(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+#define __BYTEORDER_HAS_U64__
+
+#include <xen/byteorder/little_endian.h>
+
+#endif /* __ASM_X86_BYTEORDER_H__ */
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/asm-x86/x86_emulate.h
--- a/xen/include/asm-x86/x86_emulate.h Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/include/asm-x86/x86_emulate.h Wed Jan 17 21:31:22 2007 -0700
@@ -150,8 +150,11 @@ struct x86_emulate_ctxt
/* Register state before/after emulation. */
struct cpu_user_regs *regs;
- /* Default address size in current execution mode (2, 4, or 8). */
- int address_bytes;
+ /* Default address size in current execution mode (16, 32, or 64). */
+ unsigned int addr_size;
+
+ /* Stack pointer width in bits (16, 32 or 64). */
+ unsigned int sp_size;
};
/*
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/xen/byteorder/big_endian.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/big_endian.h Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_BIG_ENDIAN_H__
+#define __XEN_BYTEORDER_BIG_ENDIAN_H__
+
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#endif
+#ifndef __BIG_ENDIAN_BITFIELD
+#define __BIG_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)(__u32)(x))
+#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
+#define __constant_htons(x) ((__force __be16)(__u16)(x))
+#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
+#define __constant_le64_to_cpu(x) ___constant_swab64((__force
__u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
+#define __constant_le32_to_cpu(x) ___constant_swab32((__force
__u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
+#define __constant_le16_to_cpu(x) ___constant_swab16((__force
__u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
+#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
+#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
+#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)__swab64p(p);
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)__swab32p(p);
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)__swab16p(p);
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)*p;
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)*p;
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)*p;
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return (__force __u16)*p;
+}
+#define __cpu_to_le64s(x) __swab64s((x))
+#define __le64_to_cpus(x) __swab64s((x))
+#define __cpu_to_le32s(x) __swab32s((x))
+#define __le32_to_cpus(x) __swab32s((x))
+#define __cpu_to_le16s(x) __swab16s((x))
+#define __le16_to_cpus(x) __swab16s((x))
+#define __cpu_to_be64s(x) do {} while (0)
+#define __be64_to_cpus(x) do {} while (0)
+#define __cpu_to_be32s(x) do {} while (0)
+#define __be32_to_cpus(x) do {} while (0)
+#define __cpu_to_be16s(x) do {} while (0)
+#define __be16_to_cpus(x) do {} while (0)
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_BIG_ENDIAN_H__ */
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/xen/byteorder/generic.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/generic.h Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,68 @@
+#ifndef __XEN_BYTEORDER_GENERIC_H__
+#define __XEN_BYTEORDER_GENERIC_H__
+
+/*
+ * Generic Byte-reordering support
+ *
+ * The "... p" macros, like le64_to_cpup, can be used with pointers
+ * to unaligned data, but there will be a performance penalty on
+ * some architectures. Use get_unaligned for unaligned data.
+ *
+ * The following macros are to be defined by <asm/byteorder.h>:
+ *
+ * Conversion of XX-bit integers (16- 32- or 64-)
+ * between native CPU format and little/big endian format
+ * 64-bit stuff only defined for proper architectures
+ * cpu_to_[bl]eXX(__uXX x)
+ * [bl]eXX_to_cpu(__uXX x)
+ *
+ * The same, but takes a pointer to the value to convert
+ * cpu_to_[bl]eXXp(__uXX x)
+ * [bl]eXX_to_cpup(__uXX x)
+ *
+ * The same, but change in situ
+ * cpu_to_[bl]eXXs(__uXX x)
+ * [bl]eXX_to_cpus(__uXX x)
+ *
+ * See asm-foo/byteorder.h for examples of how to provide
+ * architecture-optimized versions
+ */
+
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+
+#endif /* __XEN_BYTEORDER_GENERIC_H__ */
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/xen/byteorder/little_endian.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/little_endian.h Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+#define __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
+#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force
__u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force
__u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_be16_to_cpu(x) ___constant_swab16((__force
__u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
+#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
+#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
+#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)*p;
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)*p;
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)*p;
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return (__force __u16)*p;
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)__swab64p(p);
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)__swab32p(p);
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)__swab16p(p);
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+#define __cpu_to_le64s(x) do {} while (0)
+#define __le64_to_cpus(x) do {} while (0)
+#define __cpu_to_le32s(x) do {} while (0)
+#define __le32_to_cpus(x) do {} while (0)
+#define __cpu_to_le16s(x) do {} while (0)
+#define __le16_to_cpus(x) do {} while (0)
+#define __cpu_to_be64s(x) __swab64s((x))
+#define __be64_to_cpus(x) __swab64s((x))
+#define __cpu_to_be32s(x) __swab32s((x))
+#define __be32_to_cpus(x) __swab32s((x))
+#define __cpu_to_be16s(x) __swab16s((x))
+#define __be16_to_cpus(x) __swab16s((x))
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_LITTLE_ENDIAN_H__ */
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/xen/byteorder/swab.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/swab.h Wed Jan 17 21:31:22 2007 -0700
@@ -0,0 +1,185 @@
+#ifndef __XEN_BYTEORDER_SWAB_H__
+#define __XEN_BYTEORDER_SWAB_H__
+
+/*
+ * Byte-swapping, independently from CPU endianness
+ * swabXX[ps]?(foo)
+ *
+ * Francois-Rene Rideau <fare@xxxxxxxxx> 19971205
+ * separated swab functions from cpu_to_XX,
+ * to clean up support for bizarre-endian architectures.
+ */
+
+#include <xen/compiler.h>
+
+/* casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define ___swab16(x) \
+({ \
+ __u16 __x = (x); \
+ ((__u16)( \
+ (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
+})
+
+#define ___swab32(x) \
+({ \
+ __u32 __x = (x); \
+ ((__u32)( \
+ (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
+})
+
+#define ___swab64(x) \
+({ \
+ __u64 __x = (x); \
+ ((__u64)( \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
+})
+
+#define ___constant_swab16(x) \
+ ((__u16)( \
+ (((__u16)(x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(x) & (__u16)0xff00U) >> 8) ))
+#define ___constant_swab32(x) \
+ ((__u32)( \
+ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
+#define ___constant_swab64(x) \
+ ((__u64)( \
+ (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
+
+/*
+ * provide defaults when no architecture-specific optimization is detected
+ */
+#ifndef __arch__swab16
+# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
+#endif
+#ifndef __arch__swab32
+# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+#endif
+#ifndef __arch__swab64
+# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+#endif
+
+#ifndef __arch__swab16p
+# define __arch__swab16p(x) __arch__swab16(*(x))
+#endif
+#ifndef __arch__swab32p
+# define __arch__swab32p(x) __arch__swab32(*(x))
+#endif
+#ifndef __arch__swab64p
+# define __arch__swab64p(x) __arch__swab64(*(x))
+#endif
+
+#ifndef __arch__swab16s
+# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
+#endif
+#ifndef __arch__swab32s
+# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
+#endif
+#ifndef __arch__swab64s
+# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
+#endif
+
+
+/*
+ * Allow constant folding
+ */
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+# define __swab16(x) \
+(__builtin_constant_p((__u16)(x)) ? \
+ ___swab16((x)) : \
+ __fswab16((x)))
+# define __swab32(x) \
+(__builtin_constant_p((__u32)(x)) ? \
+ ___swab32((x)) : \
+ __fswab32((x)))
+# define __swab64(x) \
+(__builtin_constant_p((__u64)(x)) ? \
+ ___swab64((x)) : \
+ __fswab64((x)))
+#else
+# define __swab16(x) __fswab16(x)
+# define __swab32(x) __fswab32(x)
+# define __swab64(x) __fswab64(x)
+#endif /* OPTIMIZE */
+
+
+static inline __attribute_const__ __u16 __fswab16(__u16 x)
+{
+ return __arch__swab16(x);
+}
+static inline __u16 __swab16p(const __u16 *x)
+{
+ return __arch__swab16p(x);
+}
+static inline void __swab16s(__u16 *addr)
+{
+ __arch__swab16s(addr);
+}
+
+static inline __attribute_const__ __u32 __fswab32(__u32 x)
+{
+ return __arch__swab32(x);
+}
+static inline __u32 __swab32p(const __u32 *x)
+{
+ return __arch__swab32p(x);
+}
+static inline void __swab32s(__u32 *addr)
+{
+ __arch__swab32s(addr);
+}
+
+#ifdef __BYTEORDER_HAS_U64__
+static inline __attribute_const__ __u64 __fswab64(__u64 x)
+{
+# ifdef __SWAB_64_THRU_32__
+ __u32 h = x >> 32;
+ __u32 l = x & ((1ULL<<32)-1);
+ return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
+# else
+ return __arch__swab64(x);
+# endif
+}
+static inline __u64 __swab64p(const __u64 *x)
+{
+ return __arch__swab64p(x);
+}
+static inline void __swab64s(__u64 *addr)
+{
+ __arch__swab64s(addr);
+}
+#endif /* __BYTEORDER_HAS_U64__ */
+
+#define swab16 __swab16
+#define swab32 __swab32
+#define swab64 __swab64
+#define swab16p __swab16p
+#define swab32p __swab32p
+#define swab64p __swab64p
+#define swab16s __swab16s
+#define swab32s __swab32s
+#define swab64s __swab64s
+
+#endif /* __XEN_BYTEORDER_SWAB_H__ */
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/xen/config.h
--- a/xen/include/xen/config.h Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/include/xen/config.h Wed Jan 17 21:31:22 2007 -0700
@@ -63,6 +63,8 @@
/* Linux 'checker' project. */
#define __iomem
#define __user
+#define __force
+#define __bitwise
#ifndef __ASSEMBLY__
diff -r 7e9077dd4010 -r 26c75e0e48ed xen/include/xen/types.h
--- a/xen/include/xen/types.h Wed Jan 17 19:55:48 2007 -0700
+++ b/xen/include/xen/types.h Wed Jan 17 21:31:22 2007 -0700
@@ -51,4 +51,11 @@ struct domain;
struct domain;
struct vcpu;
+typedef __u16 __le16;
+typedef __u16 __be16;
+typedef __u32 __le32;
+typedef __u32 __be32;
+typedef __u64 __le64;
+typedef __u64 __be64;
+
#endif /* __TYPES_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|