[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 03/15] xen: Add a new target to qemu: target-xen



From: Anthony PERARD <anthony.perard@xxxxxxxxxx>

This patch adds a new Xen device model target to Qemu, called
target-xen.
The new target makes use of the previously introduced xen_machine_fv.
In order to have a fully working Xen device model we still need
functionalities introduced by the following patches.

Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 Makefile.target                    |   31 ++-
 arch_init.c                        |    2 +
 arch_init.h                        |    1 +
 configure                          |   11 +-
 default-configs/xen-dm-softmmu.mak |   24 ++
 target-xen/cpu.h                   |  120 ++++++
 target-xen/exec-dm.c               |  791 ++++++++++++++++++++++++++++++++++++
 target-xen/helper.c                |   69 ++++
 target-xen/qemu-xen.h              |   30 ++
 target-xen/stub-functions.c        |   42 ++
 target-xen/xen_mapcache.c          |   14 +
 11 files changed, 1130 insertions(+), 5 deletions(-)
 create mode 100644 default-configs/xen-dm-softmmu.mak
 create mode 100644 target-xen/cpu.h
 create mode 100644 target-xen/exec-dm.c
 create mode 100644 target-xen/helper.c
 create mode 100644 target-xen/machine.c
 create mode 100644 target-xen/qemu-xen.h
 create mode 100644 target-xen/stub-functions.c
 create mode 100644 target-xen/xen_mapcache.c

diff --git a/Makefile.target b/Makefile.target
index 8fdc884..359a984 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -183,9 +183,6 @@ QEMU_CFLAGS += $(VNC_PNG_CFLAGS)
 # xen backend driver support
 obj-$(CONFIG_XEN) += xen_machine_pv.o xen_domainbuild.o
 
-# xen full virtualized machine
-obj-$(CONFIG_XEN) += xen_machine_fv.o
-
 # USB layer
 obj-$(CONFIG_USB_OHCI) += usb-ohci.o
 
@@ -310,6 +307,34 @@ obj-y += $(addprefix $(HWDIR)/, $(hw-obj-y))
 
 endif # CONFIG_SOFTMMU
 
+# Xen Device Model
+# xen full virtualized machine
+
+# Remove some lib, because we don't want it for a xen target.
+ifeq ($(TARGET_BASE_ARCH), xen)
+bad-libobj-y = exec.o translate-all.o cpu-exec.o translate.o
+bad-libobj-y += tcg%.o fpu/%.o
+bad-libobj-y += disas.o op_helper.o
+libobj-y := $(filter-out $(bad-libobj-y), $(libobj-y))
+endif
+
+obj-xen-y += xen_machine_fv.o
+obj-xen-y += i8259.o
+obj-xen-y += pc.o
+obj-xen-y += piix_pci.o
+obj-xen-y += mc146818rtc.o
+
+obj-xen-y += xen_mapcache.o
+obj-xen-y += stub-functions.o
+
+obj-xen-y += vga.o
+obj-xen-y += hpet.o
+obj-xen-y += cirrus_vga.o
+obj-xen-y += smbios.o
+obj-xen-y += multiboot.o
+obj-xen-y += exec-dm.o
+obj-xen-y += lsi53c895a.o usb-ohci.o
+
 obj-$(CONFIG_GDBSTUB_XML) += gdbstub-xml.o
 
 $(QEMU_PROG): $(obj-y) $(obj-$(TARGET_BASE_ARCH)-y)
diff --git a/arch_init.c b/arch_init.c
index 47bb4b2..ebc5cb6 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -75,6 +75,8 @@ const char arch_config_name[] = CONFIG_QEMU_CONFDIR 
"/target-" TARGET_ARCH ".con
 #define QEMU_ARCH QEMU_ARCH_SH4
 #elif defined(TARGET_SPARC)
 #define QEMU_ARCH QEMU_ARCH_SPARC
+#elif defined(TARGET_XEN)
+#define QEMU_ARCH QEMU_ARCH_XEN
 #endif
 
 const uint32_t arch_type = QEMU_ARCH;
diff --git a/arch_init.h b/arch_init.h
index 682890c..b5f8eb1 100644
--- a/arch_init.h
+++ b/arch_init.h
@@ -16,6 +16,7 @@ enum {
     QEMU_ARCH_S390X = 256,
     QEMU_ARCH_SH4 = 512,
     QEMU_ARCH_SPARC = 1024,
+    QEMU_ARCH_XEN = 2048,
 };
 
 extern const uint32_t arch_type;
diff --git a/configure b/configure
index 89d9b44..c3f52ce 100755
--- a/configure
+++ b/configure
@@ -2517,6 +2517,9 @@ case "$target" in
   ${target_arch2}-softmmu)
     target_softmmu="yes"
     ;;
+  ${target_arch2}-dm-softmmu)
+    target_softmmu="yes"
+    ;;
   ${target_arch2}-linux-user)
     if test "$linux" != "yes" ; then
       echo "ERROR: Target '$target' is only available on a Linux host"
@@ -2582,6 +2585,10 @@ case "$target_arch2" in
     TARGET_BASE_ARCH=i386
     target_phys_bits=64
   ;;
+  xen)
+    # This is use for xen mapcache
+    target_phys_bits=64
+  ;;
   alpha)
     target_phys_bits=64
     target_nptl="yes"
@@ -2693,7 +2700,7 @@ if [ "$TARGET_ABI_DIR" = "" ]; then
 fi
 echo "TARGET_ABI_DIR=$TARGET_ABI_DIR" >> $config_target_mak
 case "$target_arch2" in
-  i386|x86_64)
+  i386|x86_64|xen)
     if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then
       echo "CONFIG_XEN=y" >> $config_target_mak
     fi
@@ -2859,7 +2866,7 @@ if test "$target_softmmu" = "yes" ; then
   arm)
     cflags="-DHAS_AUDIO $cflags"
   ;;
-  i386|mips|ppc)
+  i386|mips|ppc|xen)
     cflags="-DHAS_AUDIO -DHAS_AUDIO_CHOICE $cflags"
   ;;
   esac
diff --git a/default-configs/xen-dm-softmmu.mak 
b/default-configs/xen-dm-softmmu.mak
new file mode 100644
index 0000000..72fe141
--- /dev/null
+++ b/default-configs/xen-dm-softmmu.mak
@@ -0,0 +1,24 @@
+# Default configuration for xen-dm-softmmu
+
+CONFIG_VGA_PCI=y
+CONFIG_VGA_ISA=y
+CONFIG_VMWARE_VGA=y
+CONFIG_SERIAL=y
+CONFIG_PARALLEL=y
+CONFIG_I8254=y
+CONFIG_PCSPK=y
+CONFIG_PCKBD=y
+CONFIG_USB_UHCI=y
+CONFIG_FDC=y
+CONFIG_ACPI=y
+CONFIG_APM=y
+CONFIG_DMA=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_IDE_PCI=y
+CONFIG_IDE_ISA=y
+CONFIG_IDE_PIIX=y
+CONFIG_NE2000_ISA=y
+CONFIG_PIIX_PCI=y
+CONFIG_SOUND=y
+CONFIG_XEN=y
diff --git a/target-xen/cpu.h b/target-xen/cpu.h
new file mode 100644
index 0000000..5a45d1c
--- /dev/null
+++ b/target-xen/cpu.h
@@ -0,0 +1,120 @@
+/*
+ * xen virtual CPU header
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_XEN_H
+#define CPU_XEN_H
+
+#include "config.h"
+
+#ifdef TARGET_X86_64
+#define TARGET_LONG_BITS 64
+#else
+#define TARGET_LONG_BITS 32
+#endif
+
+#ifdef TARGET_X86_64
+#define ELF_MACHINE     EM_X86_64
+#else
+#define ELF_MACHINE     EM_386
+#endif
+
+#define CPUState struct CPUXenState
+#define CPUX86State CPUXenState
+
+#include "cpu-defs.h"
+
+#include "softfloat.h"
+
+/* hidden flags - used internally by qemu to represent additional cpu
+   states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
+   redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
+   position to ease oring with eflags. */
+/* current cpl */
+#define HF_CPL_SHIFT         0
+#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
+
+#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
+#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
+
+/* cpuid_features bits */
+#define CPUID_APIC (1 << 9)
+
+#define NB_MMU_MODES 2
+
+typedef struct CPUXenState {
+    uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
+                        are known at translation time. */
+    CPU_COMMON
+
+    /* processor features (e.g. for CPUID insn) */
+    uint32_t cpuid_features;
+    uint32_t cpuid_apic_id;
+
+    /* in order to simplify APIC support, we leave this pointer to the
+       user */
+    struct DeviceState *apic_state;
+} CPUXenState;
+
+CPUXenState *cpu_xen_init(const char *cpu_model);
+int cpu_xen_exec(CPUXenState *s);
+
+int cpu_get_pic_interrupt(CPUXenState *s);
+void cpu_set_ferr(CPUX86State *s);
+
+/* helper.c */
+void cpu_x86_set_a20(CPUXenState *env, int a20_state);
+
+/* hw/pc.c */
+void cpu_smm_update(CPUXenState *env);
+uint64_t cpu_get_tsc(CPUX86State *env);
+
+#define TARGET_PAGE_BITS 12
+
+#ifdef TARGET_X86_64
+#define TARGET_PHYS_ADDR_SPACE_BITS 52
+/* ??? This is really 48 bits, sign-extended, but the only thing
+   accessible to userland with bit 48 set is the VSYSCALL, and that
+   is handled via other mechanisms.  */
+#define TARGET_VIRT_ADDR_SPACE_BITS 47
+#else
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
+#define cpu_init cpu_xen_init
+#define cpu_exec cpu_xen_exec
+
+/* MMU modes definitions */
+static inline int cpu_mmu_index (CPUState *env)
+{
+    return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
+}
+
+#include "cpu-all.h"
+#include "exec-all.h"
+
+static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+{
+}
+
+static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+                                        target_ulong *cs_base, int *flags)
+{
+}
+
+#endif /* CPU_XEN_H */
diff --git a/target-xen/exec-dm.c b/target-xen/exec-dm.c
new file mode 100644
index 0000000..3d64695
--- /dev/null
+++ b/target-xen/exec-dm.c
@@ -0,0 +1,791 @@
+/*
+ *  virtual page mapping and translated block handling
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include "config.h"
+
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/pc.h"
+#include "disas.h"
+#include "hw/xen_common.h"
+#include "qemu-xen.h"
+#include "hw/xen.h"
+#include "hw/xen_backend.h"
+
+int use_icount = 0;
+int64_t qemu_icount;
+
+RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
+
+CPUState *first_cpu;
+/* current CPU in the current thread. It is only valid inside
+   cpu_exec() */
+CPUState *cpu_single_env;
+
+/* io memory support */
+CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
+CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+static int io_mem_nb = 1;
+
+/* log support */
+FILE *logfile;
+int loglevel;
+
+void cpu_exec_init_all(unsigned long tb_size)
+{
+}
+
+void cpu_exec_init(CPUState *env)
+{
+    CPUState **penv;
+    int cpu_index;
+
+    env->next_cpu = NULL;
+    penv = &first_cpu;
+    cpu_index = 0;
+    while (*penv != NULL) {
+        penv = (CPUState **)&(*penv)->next_cpu;
+        cpu_index++;
+    }
+    env->cpu_index = cpu_index;
+    *penv = env;
+}
+
+/* enable or disable low levels log */
+void cpu_set_log(int log_flags)
+{
+    loglevel = log_flags;
+    if (!logfile) {
+        logfile = stderr;
+    }
+}
+
+void cpu_set_log_filename(const char *filename)
+{
+    logfile = fopen(filename, "w");
+    if (!logfile) {
+        perror(filename);
+        _exit(1);
+    }
+#if !defined(CONFIG_SOFTMMU)
+    /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
+    {
+        static uint8_t logfile_buf[4096];
+        setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
+    }
+#else
+    setvbuf(logfile, NULL, _IOLBF, 0);
+#endif
+    dup2(fileno(logfile), 1);
+    dup2(fileno(logfile), 2);
+}
+
+/* mask must never be zero, except for A20 change call */
+void cpu_interrupt(CPUState *env, int mask)
+{
+    env->interrupt_request |= mask;
+}
+
+void cpu_reset_interrupt(CPUState *env, int mask)
+{
+    env->interrupt_request &= ~mask;
+}
+
+const CPULogItem cpu_log_items[] = {
+#ifdef DEBUG_IOPORT
+    { CPU_LOG_IOPORT, "ioport",
+      "show all i/o ports accesses" },
+#endif
+    { 0, NULL, NULL },
+};
+
+static int cmp1(const char *s1, int n, const char *s2)
+{
+    if (strlen(s2) != n)
+        return 0;
+    return memcmp(s1, s2, n) == 0;
+}
+
+/* takes a comma separated list of log masks. Return 0 if error. */
+int cpu_str_to_log_mask(const char *str)
+{
+    const CPULogItem *item;
+    int mask;
+    const char *p, *p1;
+
+    p = str;
+    mask = 0;
+    for(;;) {
+        p1 = strchr(p, ',');
+        if (!p1) {
+            p1 = p + strlen(p);
+        }
+        if(cmp1(p,p1-p,"all")) {
+            for(item = cpu_log_items; item->mask != 0; item++) {
+                mask |= item->mask;
+            }
+        } else {
+            for(item = cpu_log_items; item->mask != 0; item++) {
+                if (cmp1(p, p1 - p, item->name))
+                    goto found;
+            }
+            return 0;
+        }
+found:
+        mask |= item->mask;
+        if (*p1 != ',')
+            break;
+        p = p1 + 1;
+    }
+    return mask;
+}
+
+/* XXX: Simple implementation. Fix later */
+#define MAX_MMIO 1024
+static struct mmio_space {
+    target_phys_addr_t start;
+    unsigned long size;
+    unsigned long io_index;
+} mmio[MAX_MMIO];
+static unsigned long mmio_cnt;
+
+/* register physical memory. 'size' must be a multiple of the target
+   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+   io memory page */
+void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
+                                         ram_addr_t size,
+                                         ram_addr_t phys_offset,
+                                         ram_addr_t region_offset)
+{
+    region_offset &= TARGET_PAGE_MASK;
+    start_addr += region_offset;
+
+    int i;
+
+    for (i = 0; i < mmio_cnt; i++) {
+        if(mmio[i].start == start_addr) {
+            mmio[i].io_index = phys_offset;
+            mmio[i].size = size;
+            return;
+        }
+    }
+
+    if (mmio_cnt == MAX_MMIO) {
+        fprintf(stderr, "too many mmio regions\n");
+        exit(-1);
+    }
+
+    mmio[mmio_cnt].io_index = phys_offset;
+    mmio[mmio_cnt].start = start_addr;
+    mmio[mmio_cnt++].size = size;
+}
+
+/* mem_read and mem_write are arrays of functions containing the
+   function to access byte (index 0), word (index 1) and dword (index
+   2). All functions must be supplied. If io_index is non zero, the
+   corresponding io zone is modified. If it is zero, a new io zone is
+   allocated. The return value can be used with
+   cpu_register_physical_memory(). (-1) is returned if error. */
+int cpu_register_io_memory_fixed(int io_index,
+                           CPUReadMemoryFunc * const *mem_read,
+                           CPUWriteMemoryFunc * const *mem_write,
+                           void *opaque)
+{
+    int i;
+
+    if (io_index <= 0) {
+        if (io_index >= IO_MEM_NB_ENTRIES)
+            return -1;
+        io_index = io_mem_nb++;
+    } else {
+        if (io_index >= IO_MEM_NB_ENTRIES)
+            return -1;
+    }
+
+    for(i = 0;i < 3; i++) {
+        io_mem_read[io_index][i] = mem_read[i];
+        io_mem_write[io_index][i] = mem_write[i];
+    }
+    io_mem_opaque[io_index] = opaque;
+    return io_index << IO_MEM_SHIFT;
+}
+
+int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
+                           CPUWriteMemoryFunc * const *mem_write,
+                           void *opaque)
+{
+    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
+}
+
+void cpu_unregister_io_memory(int io_table_address)
+{
+    int i;
+    int io_index = io_table_address >> IO_MEM_SHIFT;
+
+    for (i = 0; i < mmio_cnt; i++) {
+        if (mmio[i].size && mmio[i].io_index == io_index) {
+            mmio[i].start = mmio[i].size = 0;
+            break;
+        }
+    }
+
+    for (i=0;i < 3; i++) {
+        io_mem_read[io_index][i] = NULL;
+        io_mem_write[io_index][i] = NULL;
+    }
+    io_mem_opaque[io_index] = NULL;
+}
+
+int cpu_physical_memory_set_dirty_tracking(int enable)
+{
+    return 0;
+}
+
+#ifdef __ia64__
+
+#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+#define ia64_sync_i()   asm volatile (";; sync.i" ::: "memory")
+#define ia64_srlz_i()   asm volatile (";; srlz.i ;;" ::: "memory")
+
+/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
+ * So to emulate right behavior that guest OS is assumed, we need to flush
+ * I/D cache here.
+ */
+static void sync_icache(uint8_t *address, int len)
+{
+    unsigned long addr = (unsigned long)address;
+    unsigned long end = addr + len;
+
+    for (addr &= ~(32UL-1); addr < end; addr += 32UL) {
+        __ia64_fc(addr);
+    }
+
+    ia64_sync_i();
+    ia64_srlz_i();
+}
+#endif
+
+static int iomem_index(target_phys_addr_t addr)
+{
+    int i;
+
+    for (i = 0; i < mmio_cnt; i++) {
+        unsigned long start, end;
+
+        start = mmio[i].start;
+        end = mmio[i].start + mmio[i].size;
+
+        if ((addr >= start) && (addr < end)) {
+            return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 
1);
+        }
+    }
+    return 0;
+}
+
+unsigned int xen_logdirty_enable = 0;
+
+/*
+ * Replace the standard byte memcpy with a word memcpy for appropriately sized
+ * memory copy operations.  Some users (USB-UHCI) can not tolerate the possible
+ * word tearing that can result from a guest concurrently writing a memory
+ * structure while the qemu device model is modifying the same location.
+ * Forcing a word-sized read/write prevents the guest from seeing a partially
+ * written word-sized atom.
+ */
+#if defined(__x86_64__) || defined(__i386__)
+static void memcpy_words(void *dst, void *src, size_t n)
+{
+    asm volatile (
+        "   movl %%edx,%%ecx \n"
+#ifdef __x86_64__
+        "   shrl $3,%%ecx    \n"
+        "   rep  movsq       \n"
+        "   test $4,%%edx    \n"
+        "   jz   1f          \n"
+        "   movsl            \n"
+#else /* __i386__ */
+        "   shrl $2,%%ecx    \n"
+        "   rep  movsl       \n"
+#endif
+        "1: test $2,%%edx    \n"
+        "   jz   1f          \n"
+        "   movsw            \n"
+        "1: test $1,%%edx    \n"
+        "   jz   1f          \n"
+        "   movsb            \n"
+        "1:                  \n"
+        : "+S" (src), "+D" (dst) : "d" (n) : "ecx", "memory" );
+}
+#else
+static void memcpy_words(void *dst, void *src, size_t n)
+{
+    /* Some architectures do not like unaligned accesses. */
+    if (((unsigned long)dst | (unsigned long)src) & 3) {
+        memcpy(dst, src, n);
+        return;
+    }
+
+    while (n >= sizeof(uint32_t)) {
+        *((uint32_t *)dst) = *((uint32_t *)src);
+        dst = ((uint32_t *)dst) + 1;
+        src = ((uint32_t *)src) + 1;
+        n -= sizeof(uint32_t);
+    }
+
+    if (n & 2) {
+        *((uint16_t *)dst) = *((uint16_t *)src);
+        dst = ((uint16_t *)dst) + 1;
+        src = ((uint16_t *)src) + 1;
+    }
+
+    if (n & 1) {
+        *((uint8_t *)dst) = *((uint8_t *)src);
+        dst = ((uint8_t *)dst) + 1;
+        src = ((uint8_t *)src) + 1;
+    }
+}
+#endif
+
+void cpu_physical_memory_rw(target_phys_addr_t _addr, uint8_t *buf,
+                            int _len, int is_write)
+{
+    target_phys_addr_t addr = _addr;
+    int len = _len;
+    int l, io_index;
+    uint8_t *ptr;
+    uint32_t val;
+
+    mapcache_lock();
+
+    while (len > 0) {
+        /* How much can we copy before the next page boundary? */
+        l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK);
+        if (l > len) {
+            l = len;
+        }
+
+        io_index = iomem_index(addr);
+        if (is_write) {
+            if (io_index) {
+                if (l >= 4 && ((addr & 3) == 0)) {
+                    /* 32 bit read access */
+                    val = ldl_raw(buf);
+                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, 
val);
+                    l = 4;
+                } else if (l >= 2 && ((addr & 1) == 0)) {
+                    /* 16 bit read access */
+                    val = lduw_raw(buf);
+                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, 
val);
+                    l = 2;
+                } else {
+                    /* 8 bit access */
+                    val = ldub_raw(buf);
+                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, 
val);
+                    l = 1;
+                }
+            } else if ((ptr = phys_ram_addr(addr)) != NULL) {
+                /* Writing to RAM */
+                memcpy_words(ptr, buf, l);
+
+                if (xen_logdirty_enable) {
+                    xc_hvm_modified_memory(xen_xc,
+                            xen_domid,
+                            addr >> TARGET_PAGE_BITS,
+                            ((addr + l + TARGET_PAGE_SIZE - 1) >> 
TARGET_PAGE_BITS)
+                            - (addr >> TARGET_PAGE_BITS));
+                }
+#ifdef __ia64__
+                sync_icache(ptr, l);
+#endif
+            }
+        } else {
+            if (io_index) {
+                if (l >= 4 && ((addr & 3) == 0)) {
+                    /* 32 bit read access */
+                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], 
addr);
+                    stl_raw(buf, val);
+                    l = 4;
+                } else if (l >= 2 && ((addr & 1) == 0)) {
+                    /* 16 bit read access */
+                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], 
addr);
+                    stw_raw(buf, val);
+                    l = 2;
+                } else {
+                    /* 8 bit access */
+                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], 
addr);
+                    stb_raw(buf, val);
+                    l = 1;
+                }
+            } else if ((ptr = phys_ram_addr(addr)) != NULL) {
+                /* Reading from RAM */
+                memcpy_words(buf, ptr, l);
+            } else {
+                /* Neither RAM nor known MMIO space */
+                memset(buf, 0xff, len);
+            }
+        }
+        len -= l;
+        buf += l;
+        addr += l;
+    }
+
+    mapcache_unlock();
+}
+
+/* virtual memory access for debug */
+int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+                        uint8_t *buf, int len, int is_write)
+{
+    int l;
+    target_ulong page, phys_addr;
+
+    while (len > 0) {
+        page = addr & TARGET_PAGE_MASK;
+        phys_addr = cpu_get_phys_page_debug(env, page);
+        /* if no physical page mapped, return an error */
+        if (phys_addr == -1)
+            return -1;
+        l = (page + TARGET_PAGE_SIZE) - addr;
+        if (l > len)
+            l = len;
+        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
+                               buf, l, is_write);
+        len -= l;
+        buf += l;
+        addr += l;
+    }
+    return 0;
+}
+
+void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
+                                     int dirty_flags)
+{
+    unsigned long length;
+    int i, mask, len;
+    uint8_t *p;
+
+    start &= TARGET_PAGE_MASK;
+    end = TARGET_PAGE_ALIGN(end);
+
+    length = end - start;
+    if (length == 0)
+        return;
+    mask = ~dirty_flags;
+    p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
+    len = length >> TARGET_PAGE_BITS;
+    for(i = 0; i < len; i++) {
+        p[i] &= mask;
+    }
+
+    return;
+}
+
+
+/* Unoptimised in Xen DM, nicked from git
+ *  aab33094073678d459ccaac5c60ea7533e8d1d8e */
+uint32_t ldub_phys(target_phys_addr_t addr)
+{
+    uint8_t val;
+    cpu_physical_memory_read(addr, &val, 1);
+    return val;
+}
+uint32_t lduw_phys(target_phys_addr_t addr)
+{
+    uint16_t val;
+    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
+    return tswap16(val);
+}
+uint64_t ldq_phys(target_phys_addr_t addr)
+{
+    uint64_t val;
+    cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
+    return tswap64(val);
+}
+void stb_phys(target_phys_addr_t addr, uint32_t val)
+{
+    uint8_t v = val;
+    cpu_physical_memory_write(addr, &v, 1);
+}
+void stw_phys(target_phys_addr_t addr, uint32_t val)
+{
+    uint16_t v = tswap16(val);
+    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
+}
+void stq_phys(target_phys_addr_t addr, uint64_t val)
+{
+    val = tswap64(val);
+    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
+}
+
+/* stubs which we hope (think!) are OK for Xen DM */
+void stl_phys(target_phys_addr_t addr, uint32_t val)
+{
+    val = tswap32(val);
+    cpu_physical_memory_write(addr, (const uint8_t *)&val, 4);
+}
+void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
+{
+    stl_phys(addr, val);
+}
+uint32_t ldl_phys(target_phys_addr_t addr)
+{
+    uint32_t val;
+    cpu_physical_memory_read(addr, (uint8_t *)&val, 4);
+    return tswap32(val);
+}
+
+void cpu_physical_memory_write_rom(target_phys_addr_t addr,
+                                   const uint8_t *buf, int len)
+{
+    return cpu_physical_memory_write(addr,buf,len);
+}
+
+void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
+{
+}
+void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
+{
+}
+
+/* stub out various functions for Xen DM */
+void dump_exec_info(FILE *f,
+                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
+{
+}
+
+void monitor_disas(Monitor *mon, CPUState *env,
+                   target_ulong pc, int nb_insn, int is_physical, int flags)
+{
+}
+
+/*
+ * This next section was clone-and-hacked from the version in exec.c
+ * :-(.  But the exec.c version is full of tcg-specific stuff and
+ * assumptions about phys_ram_base.
+ */
+
+typedef struct MapClient {
+    void *opaque;
+    void (*callback)(void *opaque);
+    QLIST_ENTRY(MapClient) link;
+} MapClient;
+
+static QLIST_HEAD(map_client_list, MapClient) map_client_list
+    = QLIST_HEAD_INITIALIZER(map_client_list);
+
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+{
+    MapClient *client = qemu_malloc(sizeof(*client));
+
+    client->opaque = opaque;
+    client->callback = callback;
+    QLIST_INSERT_HEAD(&map_client_list, client, link);
+    return client;
+}
+
+void cpu_unregister_map_client(void *_client)
+{
+    MapClient *client = (MapClient *)_client;
+
+    QLIST_REMOVE(client, link);
+    qemu_free(client);
+}
+
+static void cpu_notify_map_clients(void)
+{
+    MapClient *client;
+
+    while (!QLIST_EMPTY(&map_client_list)) {
+        client = QLIST_FIRST(&map_client_list);
+        client->callback(client->opaque);
+        cpu_unregister_map_client(client);
+    }
+}
+
+/* Map a physical memory region into a host virtual address.
+ * May map a subset of the requested range, given by and returned in *plen.
+ * May return NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ */
+void *cpu_physical_memory_map(target_phys_addr_t addr,
+                              target_phys_addr_t *plen,
+                              int is_write)
+{
+    unsigned long l = 0;
+#ifdef MAPCACHE
+    l = MCACHE_BUCKET_SIZE - (addr & (MCACHE_BUCKET_SIZE-1));
+    if ((*plen) > l) {
+        *plen = l;
+    }
+#endif
+    if (xen_logdirty_enable) {
+        xc_hvm_modified_memory(xen_xc, xen_domid, addr >> TARGET_PAGE_BITS,
+                ((addr + l + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
+                    - (addr >> TARGET_PAGE_BITS));
+    }
+
+    return qemu_map_cache(addr, 1);
+}
+
+/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
+ * Will also mark the memory as dirty if is_write == 1.  access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ */
+void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
+                               int is_write, target_phys_addr_t access_len)
+{
+    qemu_invalidate_entry(buffer);
+    cpu_notify_map_clients();
+}
+
+
+void cpu_exit(CPUState *env)
+{
+    env->exit_request = 1;
+}
+
+void qemu_flush_coalesced_mmio_buffer(void)
+{
+}
+
+void *qemu_get_ram_ptr(ram_addr_t addr)
+{
+    RAMBlock *block;
+
+    QLIST_FOREACH(block, &ram_list.blocks, next) {
+        if (addr - block->offset < block->length) {
+            QLIST_REMOVE(block, next);
+            QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
+            return block->host + (addr - block->offset);
+        }
+    }
+    return block->host + (addr - block->offset);
+
+    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
+    abort();
+
+    return NULL;
+}
+
+int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
+                                   target_phys_addr_t end_addr)
+{
+    return 0;
+}
+ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
+{
+    return 0;
+}
+
+static ram_addr_t find_ram_offset(ram_addr_t size)
+{
+    RAMBlock *block;
+    ram_addr_t last = 0;
+
+    QLIST_FOREACH(block, &ram_list.blocks, next) {
+        last = MAX(last, block->offset + block->length);
+    }
+
+    return last;
+}
+
+ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
+{
+    RAMBlock *new_block;
+
+    size = TARGET_PAGE_ALIGN(size);
+    new_block = qemu_malloc(sizeof(*new_block));
+
+    if (mem_path) {
+#if defined (__linux__) && !defined(TARGET_S390X)
+        new_block->host = 0; // file_ram_alloc(size, mem_path);
+        if (!new_block->host) {
+            new_block->host = qemu_vmalloc(size);
+#ifdef MADV_MERGEABLE
+            madvise(new_block->host, size, MADV_MERGEABLE);
+#endif
+        }
+#else
+        fprintf(stderr, "-mem-path option unsupported\n");
+        exit(1);
+#endif
+    } else {
+        new_block->host = qemu_vmalloc(size);
+#ifdef MADV_MERGEABLE
+        madvise(new_block->host, size, MADV_MERGEABLE);
+#endif
+    }
+    new_block->offset = find_ram_offset(size);
+    new_block->length = size;
+
+    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
+
+    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
+        (new_block->offset + size) >> TARGET_PAGE_BITS);
+    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
+           0xff, size >> TARGET_PAGE_BITS);
+
+    return new_block->offset;
+}
+
+void qemu_ram_free(ram_addr_t addr)
+{
+}
+
+void tb_flush(CPUState *env1)
+{
+}
+
+int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
+                          int flags, CPUWatchpoint **watchpoint)
+{
+    return -ENOSYS;
+}
+
+int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
+                          int flags)
+{
+    return -ENOENT;
+}
+
+void cpu_watchpoint_remove_all(CPUState *env, int mask)
+{
+}
+
+int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
+                          CPUBreakpoint **breakpoint)
+{
+    return -ENOSYS;
+}
+
+int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
+{
+    return -ENOSYS;
+}
+
+void cpu_breakpoint_remove_all(CPUState *env, int mask)
+{
+}
+
+void cpu_single_step(CPUState *env, int enabled)
+{
+}
diff --git a/target-xen/helper.c b/target-xen/helper.c
new file mode 100644
index 0000000..d588e64
--- /dev/null
+++ b/target-xen/helper.c
@@ -0,0 +1,69 @@
+/*
+ *  i386 helpers (without register variable usage)
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "cpu.h"
+
+CPUXenState *cpu_xen_init(const char *cpu_model)
+{
+    CPUXenState *env = NULL;
+    static int inited;
+
+    env = qemu_mallocz(sizeof(CPUXenState));
+    if (!env)
+        return NULL;
+    cpu_exec_init(env);
+
+    /* init various static tables */
+    if (!inited) {
+        inited = 1;
+
+        cpu_single_env = env;
+    }
+
+    return env;
+}
+
+int cpu_xen_exec(CPUState *env1)
+{
+    return 0;
+}
+
+void cpu_reset(CPUXenState *env)
+{
+}
+
+void cpu_dump_state(CPUState *env, FILE *f,
+                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+                    int flags)
+{
+}
+
+/***********************************************************/
+/* x86 mmu */
+/* XXX: add PGE support */
+
+void cpu_x86_set_a20(CPUXenState *env, int a20_state)
+{
+}
+
+target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+{
+    return addr;
+}
diff --git a/target-xen/machine.c b/target-xen/machine.c
new file mode 100644
index 0000000..e69de29
diff --git a/target-xen/qemu-xen.h b/target-xen/qemu-xen.h
new file mode 100644
index 0000000..d1910d6
--- /dev/null
+++ b/target-xen/qemu-xen.h
@@ -0,0 +1,30 @@
+#ifndef QEMU_XEN_H
+#define QEMU_XEN_H
+
+#include "hw/xen_common.h"
+
+/* vl.c */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define phys_ram_addr(x) (qemu_map_cache(x, 0))
+#elif defined(__ia64__)
+#define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : NULL)
+#endif
+
+/* xen_mapcache.c */
+
+uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, uint8_t lock);
+void     qemu_invalidate_entry(uint8_t *buffer);
+void     qemu_invalidate_map_cache(void);
+
+#define mapcache_lock()   ((void)0)
+#define mapcache_unlock() ((void)0)
+
+/* target-xen/exec-dm.c */
+
+int cpu_register_io_memory_fixed(int io_index,
+                           CPUReadMemoryFunc * const *mem_read,
+                           CPUWriteMemoryFunc * const *mem_write,
+                           void *opaque);
+
+#endif /*QEMU_XEN_H*/
diff --git a/target-xen/stub-functions.c b/target-xen/stub-functions.c
new file mode 100644
index 0000000..0db6898
--- /dev/null
+++ b/target-xen/stub-functions.c
@@ -0,0 +1,42 @@
+#include "config.h"
+#include "disas.h"
+#include "hw/apic.h"
+#include "hw/pc.h"
+#include "cpu.h"
+
+/* disas */
+struct syminfo *syminfos = NULL;
+
+/* apic */
+void apic_deliver_pic_intr(DeviceState *d, int level)
+{
+}
+
+int apic_get_interrupt(DeviceState *d)
+{
+    return -1;
+}
+
+int apic_accept_pic_intr(DeviceState *d)
+{
+    return 0;
+}
+
+/* vmmouse */
+void *vmmouse_init(void *m)
+{
+    return NULL;
+}
+
+/* cpu-exec */
+volatile sig_atomic_t exit_request;
+
+CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
+{
+    return NULL;
+}
+
+int qemu_cpu_has_work(CPUState *env)
+{
+    return 0;
+}
diff --git a/target-xen/xen_mapcache.c b/target-xen/xen_mapcache.c
new file mode 100644
index 0000000..39daae2
--- /dev/null
+++ b/target-xen/xen_mapcache.c
@@ -0,0 +1,14 @@
+#include "qemu-xen.h"
+
+uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, uint8_t lock)
+{
+    return phys_ram_addr(phys_addr);
+}
+
+void qemu_invalidate_map_cache(void)
+{
+}
+
+void qemu_invalidate_entry(uint8_t *buffer)
+{
+}
-- 
1.7.0.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.