# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID c4ac21dc3f16d7be84d480f8795e1902da99c230
# Parent ede16886f979dde3991f2a13c7eef71820a60cca
# Parent e8fb817c4c1530b9f763b8a726aa00d805173a56
merge
diff -r ede16886f979 -r c4ac21dc3f16 .hgignore
--- a/.hgignore Mon Mar 6 16:09:18 2006
+++ b/.hgignore Mon Mar 6 17:21:35 2006
@@ -166,6 +166,7 @@
^tools/xenstore/xenstore-read$
^tools/xenstore/xenstore-rm$
^tools/xenstore/xenstore-write$
+^tools/xenstore/xenstore-control$
^tools/xenstore/xenstore-ls$
^tools/xenstore/xenstored$
^tools/xenstore/xenstored_test$
diff -r ede16886f979 -r c4ac21dc3f16 docs/src/interface.tex
--- a/docs/src/interface.tex Mon Mar 6 16:09:18 2006
+++ b/docs/src/interface.tex Mon Mar 6 17:21:35 2006
@@ -1059,6 +1059,21 @@
\item[domain] the name of the frontend
\end{description}
\end{description}
+
+ \item[vtpm/] a directory containin vtpm backends
+ \begin{description}
+ \item[$<$domid$>$/] a directory containing vtpm's for domid
+ \begin{description}
+ \item[$<$vtpm number$>$/] a directory for each vtpm
+ \item[frontend-id] the domain id of the frontend
+ \item[frontend] the path to the frontend
+ \item[instance] the instance of the virtual TPM that is used
+ \item[pref{\textunderscore}instance] the instance number as given in the
VM configuration file;
+ may be different from {\bf instance}
+ \item[domain] the name of the domain of the frontend
+ \end{description}
+ \end{description}
+
\end{description}
\item[device/] a directory containing the frontend devices for the
@@ -1094,6 +1109,18 @@
\item[event-channel] the event channel used for the two ring queues
\end{description}
\end{description}
+
+ \item[vtpm/] a directory containing the vtpm frontend device for the
+ domain
+ \begin{description}
+ \item[$<$id$>$] a directory for vtpm id frontend device for the domain
+ \begin{description}
+ \item[backend-id] the backend domain id
+ \item[backend] a path to the backend's store entry
+ \item[ring-ref] the grant table reference for the tx/rx ring
+ \item[event-channel] the event channel used for the ring
+ \end{description}
+ \end{description}
\item[device-misc/] miscellanous information for devices
\begin{description}
@@ -1450,6 +1477,76 @@
value of {\tt first\_sect}.
\end{description}
+\section{Virtual TPM}
+
+Virtual TPM (VTPM) support provides TPM functionality to each virtual
+machine that requests this functionality in its configuration file.
+The interface enables domains to access therr own private TPM like it
+was a hardware TPM built into the machine.
+
+The virtual TPM interface is implemented as a split driver,
+similar to the network and block interfaces described above.
+The user domain hosting the frontend exports a character device /dev/tpm0
+to user-level applications for communicating with the virtual TPM.
+This is the same device interface that is also offered if a hardware TPM
+is available in the system. The backend provides a single interface
+/dev/vtpm where the virtual TPM is waiting for commands from all domains
+that have located their backend in a given domain.
+
+\subsection{Data Transfer}
+
+A single shared memory ring is used between the frontend and backend
+drivers. TPM requests and responses are sent in pages where a pointer
+to those pages and other information is placed into the ring such that
+the backend can map the pages into its memory space using the grant
+table mechanism.
+
+The backend driver has been implemented to only accept well-formed
+TPM requests. To meet this requirement, the length inidicator in the
+TPM request must correctly indicate the length of the request.
+Otherwise an error message is automatically sent back by the device driver.
+
+The virtual TPM implementation listenes for TPM request on /dev/vtpm. Since
+it must be able to apply the TPM request packet to the virtual TPM instance
+associated with the virtual machine, a 4-byte virtual TPM instance
+identifier is prepended to each packet by the backend driver (in network
+byte order) for internal routing of the request.
+
+\subsection{Virtual TPM ring interface}
+
+The TPM protocol is a strict request/response protocol and therefore
+only one ring is used to send requests from the frontend to the backend
+and responses on the reverse path.
+
+The request/response structure is defined as follows:
+
+\scriptsize
+\begin{verbatim}
+typedef struct {
+ unsigned long addr; /* Machine address of packet. */
+ grant_ref_t ref; /* grant table access reference. */
+ uint16_t unused; /* unused */
+ uint16_t size; /* Packet size in bytes. */
+} tpmif_tx_request_t;
+\end{verbatim}
+\normalsize
+
+The fields are as follows:
+
+\begin{description}
+\item[addr] The machine address of the page asscoiated with the TPM
+ request/response; a request/response may span multiple
+ pages
+\item[ref] The grant table reference associated with the address.
+\item[size] The size of the remaining packet; up to
+ PAGE{\textunderscore}SIZE bytes can be found in the
+ page referenced by 'addr'
+\end{description}
+
+The frontend initially allocates several pages whose addresses
+are stored in the ring. Only these pages are used for exchange of
+requests and responses.
+
\chapter{Further Information}
diff -r ede16886f979 -r c4ac21dc3f16 install.sh
--- a/install.sh Mon Mar 6 16:09:18 2006
+++ b/install.sh Mon Mar 6 17:21:35 2006
@@ -22,19 +22,29 @@
exit 1
fi
+tmp="`mktemp -d`"
+
echo "Installing Xen from '$src' to '$dst'..."
-(cd $src; tar -cf - --exclude etc/init.d --exclude etc/hotplug --exclude
etc/udev * ) | tar -C $dst -xf -
-cp -fdRL $src/etc/init.d/* $dst/etc/init.d/
-echo "All done."
+(cd $src; tar -cf - * ) | tar -C "$tmp" -xf -
[ -x "$(which udevinfo)" ] && \
UDEV_VERSION=$(udevinfo -V | sed -e 's/^[^0-9]*
\([0-9]\{1,\}\)[^0-9]\{0,\}/\1/')
if [ -n "$UDEV_VERSION" ] && [ $UDEV_VERSION -ge 059 ]; then
- cp -f $src/etc/udev/rules.d/*.rules $dst/etc/udev/rules.d/
+ echo " - installing for udev-based system"
+ rm -rf "$tmp/etc/hotplug"
else
- cp -f $src/etc/hotplug/*.agent $dst/etc/hotplug/
+ echo " - installing for hotplug-based system"
+ rm -rf "$tmp/etc/udev"
fi
+
+echo " - modifying permissions"
+chmod -R a+rX "$tmp"
+
+(cd $tmp; tar -cf - *) | tar --no-same-owner -C "$dst" -xf -
+rm -rf "$tmp"
+
+echo "All done."
echo "Checking to see whether prerequisite tools are installed..."
cd $src/../check
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/arch/i386/kernel/io_apic-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/io_apic-xen.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/io_apic-xen.c Mon Mar 6
17:21:35 2006
@@ -61,8 +61,8 @@
int ret;
op.cmd = PHYSDEVOP_APIC_READ;
- op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
- op.u.apic_op.offset = reg;
+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ op.u.apic_op.reg = reg;
ret = HYPERVISOR_physdev_op(&op);
if (ret)
return ret;
@@ -74,8 +74,8 @@
physdev_op_t op;
op.cmd = PHYSDEVOP_APIC_WRITE;
- op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
- op.u.apic_op.offset = reg;
+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ op.u.apic_op.reg = reg;
op.u.apic_op.value = value;
HYPERVISOR_physdev_op(&op);
}
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/arch/x86_64/kernel/io_apic-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/io_apic-xen.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/io_apic-xen.c Mon Mar 6
17:21:35 2006
@@ -108,8 +108,8 @@
int ret;
op.cmd = PHYSDEVOP_APIC_READ;
- op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
- op.u.apic_op.offset = reg;
+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ op.u.apic_op.reg = reg;
ret = HYPERVISOR_physdev_op(&op);
if (ret)
return ret;
@@ -121,8 +121,8 @@
physdev_op_t op;
op.cmd = PHYSDEVOP_APIC_WRITE;
- op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
- op.u.apic_op.offset = reg;
+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ op.u.apic_op.reg = reg;
op.u.apic_op.value = value;
HYPERVISOR_physdev_op(&op);
}
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c Mon Mar 6
17:21:35 2006
@@ -82,6 +82,8 @@
extern unsigned long start_pfn;
extern struct edid_info edid_info;
+extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
+
shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
EXPORT_SYMBOL(HYPERVISOR_shared_info);
@@ -1405,6 +1407,8 @@
select_idle_routine(c);
detect_ht(c);
+ machine_specific_modify_cpu_capabilities(c);
+
/*
* On SMP, boot_cpu_data holds the common feature set between
* all CPUs; so make sure that we indicate which features are
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/arch/x86_64/kernel/x8664_ksyms-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/x8664_ksyms-xen.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/x8664_ksyms-xen.c Mon Mar 6
17:21:35 2006
@@ -32,8 +32,6 @@
#include <asm/tlbflush.h>
#include <asm/kdebug.h>
-extern spinlock_t rtc_lock;
-
#ifdef CONFIG_SMP
extern void __write_lock_failed(rwlock_t *rw);
extern void __read_lock_failed(rwlock_t *rw);
@@ -42,9 +40,6 @@
/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
//EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(ioremap_nocache);
-EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(pm_idle);
EXPORT_SYMBOL(pm_power_off);
@@ -101,8 +96,6 @@
#endif
EXPORT_SYMBOL(get_wchan);
-
-EXPORT_SYMBOL(rtc_lock);
#ifdef CONFIG_X86_LOCAL_APIC
EXPORT_SYMBOL_GPL(set_nmi_callback);
@@ -166,7 +159,5 @@
EXPORT_SYMBOL(flush_tlb_page);
#endif
-EXPORT_SYMBOL(cpu_khz);
-
EXPORT_SYMBOL(load_gs_index);
diff -r ede16886f979 -r c4ac21dc3f16 linux-2.6-xen-sparse/drivers/xen/char/mem.c
--- a/linux-2.6-xen-sparse/drivers/xen/char/mem.c Mon Mar 6 16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/char/mem.c Mon Mar 6 17:21:35 2006
@@ -43,49 +43,85 @@
static ssize_t read_mem(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned long i, p = *ppos;
- ssize_t read = -EFAULT;
+ unsigned long p = *ppos, ignored;
+ ssize_t read = 0, sz;
void __iomem *v;
- if ((v = ioremap(p, count)) == NULL) {
+ while (count > 0) {
/*
- * Some programs (e.g., dmidecode) groove off into weird RAM
- * areas where no table scan possibly exist (because Xen will
- * have stomped on them!). These programs get rather upset if
- * we let them know that Xen failed their access, so we fake
- * out a read of all zeroes. :-)
+ * Handle first page in case it's not aligned
*/
- for (i = 0; i < count; i++)
- if (put_user(0, buf+i))
+ if (-p & (PAGE_SIZE - 1))
+ sz = -p & (PAGE_SIZE - 1);
+ else
+ sz = PAGE_SIZE;
+
+ sz = min_t(unsigned long, sz, count);
+
+ if ((v = ioremap(p, sz)) == NULL) {
+ /*
+ * Some programs (e.g., dmidecode) groove off into
weird RAM
+ * areas where no tables can possibly exist (because
Xen will
+ * have stomped on them!). These programs get rather
upset if
+ * we let them know that Xen failed their access, so we
fake
+ * out a read of all zeroes. :-)
+ */
+ if (clear_user(buf, count))
return -EFAULT;
- return count;
+ read += count;
+ break;
+ }
+
+ ignored = copy_to_user(buf, v, sz);
+ iounmap(v);
+ if (ignored)
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
}
- if (copy_to_user(buf, v, count))
- goto out;
- read = count;
*ppos += read;
-out:
- iounmap(v);
return read;
}
static ssize_t write_mem(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned long p = *ppos;
- ssize_t written = -EFAULT;
+ unsigned long p = *ppos, ignored;
+ ssize_t written = 0, sz;
void __iomem *v;
- if ((v = ioremap(p, count)) == NULL)
- return -EFAULT;
- if (copy_from_user(v, buf, count))
- goto out;
+ while (count > 0) {
+ /*
+ * Handle first page in case it's not aligned
+ */
+ if (-p & (PAGE_SIZE - 1))
+ sz = -p & (PAGE_SIZE - 1);
+ else
+ sz = PAGE_SIZE;
- written = count;
+ sz = min_t(unsigned long, sz, count);
+
+ if ((v = ioremap(p, sz)) == NULL)
+ break;
+
+ ignored = copy_from_user(v, buf, sz);
+ iounmap(v);
+ if (ignored) {
+ written += sz - ignored;
+ if (written)
+ break;
+ return -EFAULT;
+ }
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ }
+
*ppos += written;
-out:
- iounmap(v);
return written;
}
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/core/gnttab.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Mon Mar 6 16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Mon Mar 6 17:21:35 2006
@@ -35,7 +35,6 @@
#include <xen/interface/xen.h>
#include <asm/fixmap.h>
#include <asm/uaccess.h>
-#include <xen/public/privcmd.h>
#include <xen/gnttab.h>
#include <asm/synch_bitops.h>
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c Mon Mar 6
17:21:35 2006
@@ -19,8 +19,7 @@
LIST_HEAD(tpmif_list);
-tpmif_t *
-alloc_tpmif(domid_t domid, long int instance)
+static tpmif_t *alloc_tpmif(domid_t domid, long int instance)
{
struct page *page;
tpmif_t *tpmif;
@@ -45,16 +44,14 @@
return tpmif;
}
-void
-free_tpmif(tpmif_t * tpmif)
+static void free_tpmif(tpmif_t * tpmif)
{
num_frontends--;
list_del(&tpmif->tpmif_list);
kmem_cache_free(tpmif_cachep, tpmif);
}
-tpmif_t *
-tpmif_find(domid_t domid, long int instance)
+tpmif_t *tpmif_find(domid_t domid, long int instance)
{
tpmif_t *tpmif;
@@ -72,8 +69,7 @@
return alloc_tpmif(domid, instance);
}
-static int
-map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
+static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
{
int ret;
struct gnttab_map_grant_ref op = {
@@ -99,8 +95,7 @@
return 0;
}
-static void
-unmap_frontend_page(tpmif_t *tpmif)
+static void unmap_frontend_page(tpmif_t *tpmif)
{
struct gnttab_unmap_grant_ref op;
int ret;
@@ -115,14 +110,14 @@
BUG_ON(ret);
}
-int
-tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
{
int err;
evtchn_op_t op = {
.cmd = EVTCHNOP_bind_interdomain,
.u.bind_interdomain.remote_dom = tpmif->domid,
- .u.bind_interdomain.remote_port = evtchn };
+ .u.bind_interdomain.remote_port = evtchn,
+ };
if (tpmif->irq) {
return 0;
@@ -156,8 +151,7 @@
return 0;
}
-static void
-__tpmif_disconnect_complete(void *arg)
+static void __tpmif_disconnect_complete(void *arg)
{
tpmif_t *tpmif = (tpmif_t *) arg;
@@ -172,22 +166,19 @@
free_tpmif(tpmif);
}
-void
-tpmif_disconnect_complete(tpmif_t * tpmif)
+void tpmif_disconnect_complete(tpmif_t * tpmif)
{
INIT_WORK(&tpmif->work, __tpmif_disconnect_complete, (void *)tpmif);
schedule_work(&tpmif->work);
}
-void __init
-tpmif_interface_init(void)
+void __init tpmif_interface_init(void)
{
tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
0, 0, NULL, NULL);
}
-void __init
-tpmif_interface_exit(void)
+void __init tpmif_interface_exit(void)
{
kmem_cache_destroy(tpmif_cachep);
}
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Mon Mar 6
17:21:35 2006
@@ -22,16 +22,22 @@
#include <xen/xenbus.h>
#include <xen/interface/grant_table.h>
-
/* local data structures */
struct data_exchange {
struct list_head pending_pak;
struct list_head current_pak;
unsigned int copied_so_far;
u8 has_opener;
- rwlock_t pak_lock; // protects all of the previous fields
+ rwlock_t pak_lock; // protects all of the previous fields
wait_queue_head_t wait_queue;
};
+
+struct vtpm_resp_hdr {
+ uint32_t instance_no;
+ uint16_t tag_no;
+ uint32_t len_no;
+ uint32_t ordinal_no;
+} __attribute__ ((packed));
struct packet {
struct list_head next;
@@ -50,36 +56,30 @@
PACKET_FLAG_CHECK_RESPONSESTATUS = 2,
};
+/* local variables */
static struct data_exchange dataex;
/* local function prototypes */
+static int _packet_write(struct packet *pak,
+ const char *data, size_t size, int userbuffer);
+static void processing_timeout(unsigned long ptr);
+static int packet_read_shmem(struct packet *pak,
+ tpmif_t * tpmif,
+ u32 offset,
+ char *buffer, int isuserbuffer, u32 left);
static int vtpm_queue_packet(struct packet *pak);
-static int _packet_write(struct packet *pak,
- const char *data, size_t size,
- int userbuffer);
-static void processing_timeout(unsigned long ptr);
-static int packet_read_shmem(struct packet *pak,
- tpmif_t *tpmif,
- u32 offset,
- char *buffer,
- int isuserbuffer,
- u32 left);
-
#define MIN(x,y) (x) < (y) ? (x) : (y)
-
/***************************************************************
- Buffer copying
+ Buffer copying fo user and kernel space buffes.
***************************************************************/
-static inline int
-copy_from_buffer(void *to,
- const void *from,
- unsigned long size,
- int userbuffer)
-{
- if (userbuffer) {
- if (copy_from_user(to, from, size))
+static inline int copy_from_buffer(void *to,
+ const void *from, unsigned long size,
+ int isuserbuffer)
+{
+ if (isuserbuffer) {
+ if (copy_from_user(to, (void __user *)from, size))
return -EFAULT;
} else {
memcpy(to, from, size);
@@ -87,21 +87,36 @@
return 0;
}
+static inline int copy_to_buffer(void *to,
+ const void *from, unsigned long size,
+ int isuserbuffer)
+{
+ if (isuserbuffer) {
+ if (copy_to_user((void __user *)to, from, size))
+ return -EFAULT;
+ } else {
+ memcpy(to, from, size);
+ }
+ return 0;
+}
+
/***************************************************************
Packet-related functions
***************************************************************/
-static struct packet *
-packet_find_instance(struct list_head *head, u32 tpm_instance)
+static struct packet *packet_find_instance(struct list_head *head,
+ u32 tpm_instance)
{
struct packet *pak;
struct list_head *p;
+
/*
* traverse the list of packets and return the first
* one with the given instance number
*/
list_for_each(p, head) {
pak = list_entry(p, struct packet, next);
+
if (pak->tpm_instance == tpm_instance) {
return pak;
}
@@ -109,17 +124,18 @@
return NULL;
}
-static struct packet *
-packet_find_packet(struct list_head *head, void *packet)
+static struct packet *packet_find_packet(struct list_head *head, void *packet)
{
struct packet *pak;
struct list_head *p;
+
/*
* traverse the list of packets and return the first
* one with the given instance number
*/
list_for_each(p, head) {
pak = list_entry(p, struct packet, next);
+
if (pak == packet) {
return pak;
}
@@ -127,22 +143,20 @@
return NULL;
}
-static struct packet *
-packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags)
+static struct packet *packet_alloc(tpmif_t * tpmif,
+ u32 size, u8 req_tag, u8 flags)
{
struct packet *pak = NULL;
- pak = kmalloc(sizeof(struct packet),
- GFP_KERNEL);
+ pak = kzalloc(sizeof (struct packet), GFP_KERNEL);
if (NULL != pak) {
- memset(pak, 0x0, sizeof(*pak));
if (tpmif) {
pak->tpmif = tpmif;
pak->tpm_instance = tpmif->tpm_instance;
}
- pak->data_len = size;
- pak->req_tag = req_tag;
+ pak->data_len = size;
+ pak->req_tag = req_tag;
pak->last_read = 0;
- pak->flags = flags;
+ pak->flags = flags;
/*
* cannot do tpmif_get(tpmif); bad things happen
@@ -155,16 +169,16 @@
return pak;
}
-static void inline
-packet_reset(struct packet *pak)
+static void inline packet_reset(struct packet *pak)
{
pak->last_read = 0;
}
-static void inline
-packet_free(struct packet *pak)
-{
- del_singleshot_timer_sync(&pak->processing_timer);
+static void packet_free(struct packet *pak)
+{
+ if (timer_pending(&pak->processing_timer)) {
+ BUG();
+ }
kfree(pak->data_buffer);
/*
* cannot do tpmif_put(pak->tpmif); bad things happen
@@ -173,13 +187,13 @@
kfree(pak);
}
-static int
-packet_set(struct packet *pak,
- const unsigned char *buffer, u32 size)
+static int packet_set(struct packet *pak,
+ const unsigned char *buffer, u32 size)
{
int rc = 0;
unsigned char *buf = kmalloc(size, GFP_KERNEL);
- if (NULL != buf) {
+
+ if (buf) {
pak->data_buffer = buf;
memcpy(buf, buffer, size);
pak->data_len = size;
@@ -189,27 +203,21 @@
return rc;
}
-
/*
* Write data to the shared memory and send it to the FE.
*/
-static int
-packet_write(struct packet *pak,
- const char *data, size_t size,
- int userbuffer)
+static int packet_write(struct packet *pak,
+ const char *data, size_t size, int isuserbuffer)
{
int rc = 0;
- DPRINTK("Supposed to send %d bytes to front-end!\n",
- size);
-
- if (0 != (pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) {
+ if ((pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) {
#ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
u32 res;
+
if (copy_from_buffer(&res,
- &data[2+4],
- sizeof(res),
- userbuffer)) {
+ &data[2 + 4], sizeof (res),
+ isuserbuffer)) {
return -EFAULT;
}
@@ -230,17 +238,14 @@
/* Don't send a respone to this packet. Just acknowledge it. */
rc = size;
} else {
- rc = _packet_write(pak, data, size, userbuffer);
+ rc = _packet_write(pak, data, size, isuserbuffer);
}
return rc;
}
-
-static int
-_packet_write(struct packet *pak,
- const char *data, size_t size,
- int userbuffer)
+int _packet_write(struct packet *pak,
+ const char *data, size_t size, int isuserbuffer)
{
/*
* Write into the shared memory pages directly
@@ -254,7 +259,7 @@
if (tpmif == NULL) {
return -EFAULT;
- }
+ }
if (tpmif->status == DISCONNECTED) {
return size;
@@ -273,16 +278,13 @@
return 0;
}
- map_op.host_addr = MMAP_VADDR(tpmif, i);
- map_op.flags = GNTMAP_host_map;
- map_op.ref = tx->ref;
- map_op.dom = tpmif->domid;
-
- if(unlikely(
- HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref,
- &map_op,
- 1))) {
+ map_op.host_addr = MMAP_VADDR(tpmif, i);
+ map_op.flags = GNTMAP_host_map;
+ map_op.ref = tx->ref;
+ map_op.dom = tpmif->domid;
+
+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+ &map_op, 1))) {
BUG();
}
@@ -292,28 +294,27 @@
DPRINTK(" Grant table operation failure !\n");
return 0;
}
- set_phys_to_machine(__pa(MMAP_VADDR(tpmif,i)) >> PAGE_SHIFT,
- FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT));
+ set_phys_to_machine(__pa(MMAP_VADDR(tpmif, i)) >> PAGE_SHIFT,
+ FOREIGN_FRAME(map_op.
+ dev_bus_addr >> PAGE_SHIFT));
tocopy = MIN(size - offset, PAGE_SIZE);
- if (copy_from_buffer((void *)(MMAP_VADDR(tpmif,i)|
- (tx->addr & ~PAGE_MASK)),
- &data[offset],
- tocopy,
- userbuffer)) {
+ if (copy_from_buffer((void *)(MMAP_VADDR(tpmif, i) |
+ (tx->addr & ~PAGE_MASK)),
+ &data[offset], tocopy, isuserbuffer)) {
tpmif_put(tpmif);
return -EFAULT;
}
tx->size = tocopy;
- unmap_op.host_addr = MMAP_VADDR(tpmif, i);
- unmap_op.handle = handle;
+ unmap_op.host_addr = MMAP_VADDR(tpmif, i);
+ unmap_op.handle = handle;
unmap_op.dev_bus_addr = 0;
- if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
- &unmap_op,
- 1))) {
+ if (unlikely
+ (HYPERVISOR_grant_table_op
+ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
BUG();
}
@@ -322,8 +323,7 @@
}
rc = offset;
- DPRINTK("Notifying frontend via irq %d\n",
- tpmif->irq);
+ DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
notify_remote_via_irq(tpmif->irq);
return rc;
@@ -334,26 +334,19 @@
* provided buffer. Advance the read_last indicator which tells
* how many bytes have already been read.
*/
-static int
-packet_read(struct packet *pak, size_t numbytes,
- char *buffer, size_t buffersize,
- int userbuffer)
+static int packet_read(struct packet *pak, size_t numbytes,
+ char *buffer, size_t buffersize, int isuserbuffer)
{
tpmif_t *tpmif = pak->tpmif;
- /*
- * I am supposed to read 'numbytes' of data from the
- * buffer.
- * The first 4 bytes that are read are the instance number in
- * network byte order, after that comes the data from the
- * shared memory buffer.
+
+ /*
+ * Read 'numbytes' of data from the buffer. The first 4
+ * bytes are the instance number in network byte order,
+ * after that come the data from the shared memory buffer.
*/
u32 to_copy;
u32 offset = 0;
u32 room_left = buffersize;
- /*
- * Ensure that we see the request when we copy it.
- */
- mb();
if (pak->last_read < 4) {
/*
@@ -361,18 +354,13 @@
*/
u32 instance_no = htonl(pak->tpm_instance);
u32 last_read = pak->last_read;
+
to_copy = MIN(4 - last_read, numbytes);
- if (userbuffer) {
- if (copy_to_user(&buffer[0],
- &(((u8 *)&instance_no)[last_read]),
- to_copy)) {
- return -EFAULT;
- }
- } else {
- memcpy(&buffer[0],
- &(((u8 *)&instance_no)[last_read]),
- to_copy);
+ if (copy_to_buffer(&buffer[0],
+ &(((u8 *) & instance_no)[last_read]),
+ to_copy, isuserbuffer)) {
+ return -EFAULT;
}
pak->last_read += to_copy;
@@ -388,39 +376,30 @@
if (pak->data_buffer) {
u32 to_copy = MIN(pak->data_len - offset, room_left);
u32 last_read = pak->last_read - 4;
- if (userbuffer) {
- if (copy_to_user(&buffer[offset],
- &pak->data_buffer[last_read],
- to_copy)) {
- return -EFAULT;
- }
- } else {
- memcpy(&buffer[offset],
- &pak->data_buffer[last_read],
- to_copy);
+
+ if (copy_to_buffer(&buffer[offset],
+ &pak->data_buffer[last_read],
+ to_copy, isuserbuffer)) {
+ return -EFAULT;
}
pak->last_read += to_copy;
offset += to_copy;
} else {
offset = packet_read_shmem(pak,
- tpmif,
- offset,
- buffer,
- userbuffer,
- room_left);
+ tpmif,
+ offset,
+ buffer,
+ isuserbuffer, room_left);
}
}
return offset;
}
-
-static int
-packet_read_shmem(struct packet *pak,
- tpmif_t *tpmif,
- u32 offset,
- char *buffer,
- int isuserbuffer,
- u32 room_left) {
+static int packet_read_shmem(struct packet *pak,
+ tpmif_t * tpmif,
+ u32 offset, char *buffer, int isuserbuffer,
+ u32 room_left)
+{
u32 last_read = pak->last_read - 4;
u32 i = (last_read / PAGE_SIZE);
u32 pg_offset = last_read & (PAGE_SIZE - 1);
@@ -428,6 +407,7 @@
grant_handle_t handle;
tpmif_tx_request_t *tx;
+
tx = &tpmif->tx->ring[0].req;
/*
* Start copying data at the page with index 'index'
@@ -443,13 +423,12 @@
tx = &tpmif->tx->ring[i].req;
map_op.host_addr = MMAP_VADDR(tpmif, i);
- map_op.flags = GNTMAP_host_map;
- map_op.ref = tx->ref;
- map_op.dom = tpmif->domid;
-
- if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
- &map_op,
- 1))) {
+ map_op.flags = GNTMAP_host_map;
+ map_op.ref = tx->ref;
+ map_op.dom = tpmif->domid;
+
+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+ &map_op, 1))) {
BUG();
}
@@ -462,41 +441,33 @@
if (to_copy > tx->size) {
/*
- * This is the case when the user wants to read more
- * than what we have. So we just give him what we
- * have.
+ * User requests more than what's available
*/
to_copy = MIN(tx->size, to_copy);
}
DPRINTK("Copying from mapped memory at %08lx\n",
- (unsigned long)(MMAP_VADDR(tpmif,i) |
- (tx->addr & ~PAGE_MASK)));
-
- src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) +
pg_offset));
- if (isuserbuffer) {
- if (copy_to_user(&buffer[offset],
- src,
- to_copy)) {
- return -EFAULT;
- }
- } else {
- memcpy(&buffer[offset],
- src,
- to_copy);
- }
-
+ (unsigned long)(MMAP_VADDR(tpmif, i) |
+ (tx->addr & ~PAGE_MASK)));
+
+ src = (void *)(MMAP_VADDR(tpmif, i) |
+ ((tx->addr & ~PAGE_MASK) + pg_offset));
+ if (copy_to_buffer(&buffer[offset],
+ src, to_copy, isuserbuffer)) {
+ return -EFAULT;
+ }
DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
- tpmif->domid, buffer[offset],
buffer[offset+1],buffer[offset+2],buffer[offset+3]);
-
- unmap_op.host_addr = MMAP_VADDR(tpmif, i);
- unmap_op.handle = handle;
+ tpmif->domid, buffer[offset], buffer[offset + 1],
+ buffer[offset + 2], buffer[offset + 3]);
+
+ unmap_op.host_addr = MMAP_VADDR(tpmif, i);
+ unmap_op.handle = handle;
unmap_op.dev_bus_addr = 0;
- if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
- &unmap_op,
- 1))) {
+ if (unlikely
+ (HYPERVISOR_grant_table_op
+ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
BUG();
}
@@ -507,21 +478,19 @@
to_copy = MIN(PAGE_SIZE, room_left);
i++;
- } /* while (to_copy > 0) */
+ } /* while (to_copy > 0) */
/*
* Adjust the last_read pointer
*/
pak->last_read = last_read + 4;
return offset;
}
-
/* ============================================================
* The file layer for reading data from this device
* ============================================================
*/
-static int
-vtpm_op_open(struct inode *inode, struct file *f)
+static int vtpm_op_open(struct inode *inode, struct file *f)
{
int rc = 0;
unsigned long flags;
@@ -536,9 +505,8 @@
return rc;
}
-static ssize_t
-vtpm_op_read(struct file *file,
- char __user * data, size_t size, loff_t * offset)
+static ssize_t vtpm_op_read(struct file *file,
+ char __user * data, size_t size, loff_t * offset)
{
int ret_size = -ENODATA;
struct packet *pak = NULL;
@@ -549,7 +517,7 @@
if (list_empty(&dataex.pending_pak)) {
write_unlock_irqrestore(&dataex.pak_lock, flags);
wait_event_interruptible(dataex.wait_queue,
- !list_empty(&dataex.pending_pak));
+ !list_empty(&dataex.pending_pak));
write_lock_irqsave(&dataex.pak_lock, flags);
}
@@ -561,7 +529,7 @@
DPRINTK("size given by app: %d, available: %d\n", size, left);
- ret_size = MIN(size,left);
+ ret_size = MIN(size, left);
ret_size = packet_read(pak, ret_size, data, size, 1);
if (ret_size < 0) {
@@ -574,7 +542,8 @@
DPRINTK("All data from this packet given to
app.\n");
/* All data given to app */
-
del_singleshot_timer_sync(&pak->processing_timer);
+ del_singleshot_timer_sync(&pak->
+ processing_timer);
list_del(&pak->next);
list_add_tail(&pak->next, &dataex.current_pak);
/*
@@ -582,7 +551,7 @@
* the more time we give the TPM to process the
request.
*/
mod_timer(&pak->processing_timer,
- jiffies + (num_frontends * 60 * HZ));
+ jiffies + (num_frontends * 60 * HZ));
dataex.copied_so_far = 0;
}
}
@@ -597,16 +566,15 @@
/*
* Write operation - only works after a previous read operation!
*/
-static ssize_t
-vtpm_op_write(struct file *file, const char __user * data, size_t size,
- loff_t * offset)
+static ssize_t vtpm_op_write(struct file *file,
+ const char __user * data, size_t size,
+ loff_t * offset)
{
struct packet *pak;
int rc = 0;
unsigned int off = 4;
unsigned long flags;
- u32 instance_no = 0;
- u32 len_no = 0;
+ struct vtpm_resp_hdr vrh;
/*
* Minimum required packet size is:
@@ -616,45 +584,38 @@
* 4 bytes for the ordinal
* sum: 14 bytes
*/
- if ( size < off + 10 ) {
+ if (size < sizeof (vrh))
return -EFAULT;
- }
-
- if (copy_from_user(&instance_no,
- (void __user *)&data[0],
- 4)) {
+
+ if (copy_from_user(&vrh, data, sizeof (vrh)))
return -EFAULT;
- }
-
- if (copy_from_user(&len_no,
- (void __user *)&data[off+2],
- 4) ||
- (off + ntohl(len_no) != size)) {
+
+ /* malformed packet? */
+ if ((off + ntohl(vrh.len_no)) != size)
return -EFAULT;
- }
write_lock_irqsave(&dataex.pak_lock, flags);
- pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no));
+ pak = packet_find_instance(&dataex.current_pak,
+ ntohl(vrh.instance_no));
if (pak == NULL) {
write_unlock_irqrestore(&dataex.pak_lock, flags);
- printk(KERN_ALERT "No associated packet!\n");
+ printk(KERN_ALERT "No associated packet! (inst=%d)\n",
+ ntohl(vrh.instance_no));
return -EFAULT;
- } else {
- del_singleshot_timer_sync(&pak->processing_timer);
- list_del(&pak->next);
- }
+ }
+
+ del_singleshot_timer_sync(&pak->processing_timer);
+ list_del(&pak->next);
write_unlock_irqrestore(&dataex.pak_lock, flags);
/*
- * The first 'offset' bytes must be the instance number.
- * I will just pull that from the packet.
+ * The first 'offset' bytes must be the instance number - skip them.
*/
size -= off;
- data = &data[off];
-
- rc = packet_write(pak, data, size, 1);
+
+ rc = packet_write(pak, &data[off], size, 1);
if (rc > 0) {
/* I neglected the first 4 bytes */
@@ -664,10 +625,10 @@
return rc;
}
-static int
-vtpm_op_release(struct inode *inode, struct file *file)
+static int vtpm_op_release(struct inode *inode, struct file *file)
{
unsigned long flags;
+
vtpm_release_packets(NULL, 1);
write_lock_irqsave(&dataex.pak_lock, flags);
dataex.has_opener = 0;
@@ -675,10 +636,11 @@
return 0;
}
-static unsigned int
-vtpm_op_poll(struct file *file, struct poll_table_struct *pts)
+static unsigned int vtpm_op_poll(struct file *file,
+ struct poll_table_struct *pts)
{
unsigned int flags = POLLOUT | POLLWRNORM;
+
poll_wait(file, &dataex.wait_queue, pts);
if (!list_empty(&dataex.pending_pak)) {
flags |= POLLIN | POLLRDNORM;
@@ -696,54 +658,47 @@
.poll = vtpm_op_poll,
};
-static struct miscdevice ibmvtpms_miscdevice = {
+static struct miscdevice vtpms_miscdevice = {
.minor = 225,
.name = "vtpm",
.fops = &vtpm_ops,
};
-
/***************************************************************
Virtual TPM functions and data stuctures
***************************************************************/
static u8 create_cmd[] = {
- 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
- 0,0,0,19, /* 2: length */
- 0,0,0,0x1, /* 6: VTPM_ORD_OPEN */
- 0, /* 10: VTPM type */
- 0,0,0,0, /* 11: domain id */
- 0,0,0,0 /* 15: instance id */
+ 1, 193, /* 0: TPM_TAG_RQU_COMMAMD */
+ 0, 0, 0, 19, /* 2: length */
+ 0, 0, 0, 0x1, /* 6: VTPM_ORD_OPEN */
+ 0, /* 10: VTPM type */
+ 0, 0, 0, 0, /* 11: domain id */
+ 0, 0, 0, 0 /* 15: instance id */
};
-static u8 destroy_cmd[] = {
- 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
- 0,0,0,14, /* 2: length */
- 0,0,0,0x2, /* 6: VTPM_ORD_CLOSE */
- 0,0,0,0 /* 10: instance id */
-};
-
-int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance)
+int tpmif_vtpm_open(tpmif_t * tpmif, domid_t domid, u32 instance)
{
int rc = 0;
struct packet *pak;
pak = packet_alloc(tpmif,
- sizeof(create_cmd),
- create_cmd[0],
- PACKET_FLAG_DISCARD_RESPONSE|
- PACKET_FLAG_CHECK_RESPONSESTATUS);
+ sizeof (create_cmd),
+ create_cmd[1],
+ PACKET_FLAG_DISCARD_RESPONSE |
+ PACKET_FLAG_CHECK_RESPONSESTATUS);
if (pak) {
- u8 buf[sizeof(create_cmd)];
- u32 domid_no = htonl((u32)domid);
+ u8 buf[sizeof (create_cmd)];
+ u32 domid_no = htonl((u32) domid);
u32 instance_no = htonl(instance);
- memcpy(buf, create_cmd, sizeof(create_cmd));
-
- memcpy(&buf[11], &domid_no, sizeof(u32));
- memcpy(&buf[15], &instance_no, sizeof(u32));
+
+ memcpy(buf, create_cmd, sizeof (create_cmd));
+
+ memcpy(&buf[11], &domid_no, sizeof (u32));
+ memcpy(&buf[15], &instance_no, sizeof (u32));
/* copy the buffer into the packet */
- rc = packet_set(pak, buf, sizeof(buf));
+ rc = packet_set(pak, buf, sizeof (buf));
if (rc == 0) {
pak->tpm_instance = 0;
@@ -759,23 +714,30 @@
return rc;
}
+static u8 destroy_cmd[] = {
+ 1, 193, /* 0: TPM_TAG_RQU_COMMAMD */
+ 0, 0, 0, 14, /* 2: length */
+ 0, 0, 0, 0x2, /* 6: VTPM_ORD_CLOSE */
+ 0, 0, 0, 0 /* 10: instance id */
+};
+
int tpmif_vtpm_close(u32 instid)
{
int rc = 0;
struct packet *pak;
pak = packet_alloc(NULL,
- sizeof(create_cmd),
- create_cmd[0],
- PACKET_FLAG_DISCARD_RESPONSE);
+ sizeof (destroy_cmd),
+ destroy_cmd[1], PACKET_FLAG_DISCARD_RESPONSE);
if (pak) {
- u8 buf[sizeof(destroy_cmd)];
+ u8 buf[sizeof (destroy_cmd)];
u32 instid_no = htonl(instid);
- memcpy(buf, destroy_cmd, sizeof(destroy_cmd));
- memcpy(&buf[10], &instid_no, sizeof(u32));
+
+ memcpy(buf, destroy_cmd, sizeof (destroy_cmd));
+ memcpy(&buf[10], &instid_no, sizeof (u32));
/* copy the buffer into the packet */
- rc = packet_set(pak, buf, sizeof(buf));
+ rc = packet_set(pak, buf, sizeof (buf));
if (rc == 0) {
pak->tpm_instance = 0;
@@ -791,23 +753,22 @@
return rc;
}
-
/***************************************************************
Utility functions
***************************************************************/
-static int
-tpm_send_fail_message(struct packet *pak, u8 req_tag)
+static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
{
int rc;
static const unsigned char tpm_error_message_fail[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x0a,
- 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
+ 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
};
- unsigned char buffer[sizeof(tpm_error_message_fail)];
-
- memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail));
+ unsigned char buffer[sizeof (tpm_error_message_fail)];
+
+ memcpy(buffer, tpm_error_message_fail,
+ sizeof (tpm_error_message_fail));
/*
* Insert the right response tag depending on the given tag
* All response tags are '+3' to the request tag.
@@ -817,23 +778,24 @@
/*
* Write the data to shared memory and notify the front-end
*/
- rc = packet_write(pak, buffer, sizeof(buffer), 0);
+ rc = packet_write(pak, buffer, sizeof (buffer), 0);
return rc;
}
-
-static void
-_vtpm_release_packets(struct list_head *head, tpmif_t *tpmif,
- int send_msgs)
+static void _vtpm_release_packets(struct list_head *head,
+ tpmif_t * tpmif, int send_msgs)
{
struct packet *pak;
- struct list_head *pos, *tmp;
+ struct list_head *pos,
+ *tmp;
list_for_each_safe(pos, tmp, head) {
pak = list_entry(pos, struct packet, next);
+
if (tpmif == NULL || pak->tpmif == tpmif) {
int can_send = 0;
+
del_singleshot_timer_sync(&pak->processing_timer);
list_del(&pak->next);
@@ -849,9 +811,7 @@
}
}
-
-int
-vtpm_release_packets(tpmif_t *tpmif, int send_msgs)
+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
{
unsigned long flags;
@@ -860,23 +820,22 @@
_vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
_vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
- write_unlock_irqrestore(&dataex.pak_lock,
- flags);
+ write_unlock_irqrestore(&dataex.pak_lock, flags);
return 0;
}
-
static int vtpm_queue_packet(struct packet *pak)
{
int rc = 0;
+
if (dataex.has_opener) {
unsigned long flags;
+
write_lock_irqsave(&dataex.pak_lock, flags);
list_add_tail(&pak->next, &dataex.pending_pak);
/* give the TPM some time to pick up the request */
mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
- write_unlock_irqrestore(&dataex.pak_lock,
- flags);
+ write_unlock_irqrestore(&dataex.pak_lock, flags);
wake_up_interruptible(&dataex.wait_queue);
} else {
@@ -885,24 +844,22 @@
return rc;
}
-
-static int vtpm_receive(tpmif_t *tpmif, u32 size)
+static int vtpm_receive(tpmif_t * tpmif, u32 size)
{
int rc = 0;
unsigned char buffer[10];
__be32 *native_size;
-
- struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0);
- if (NULL == pak) {
+ struct packet *pak = packet_alloc(tpmif, size, 0, 0);
+
+ if (!pak)
return -ENOMEM;
- }
/*
* Read 10 bytes from the received buffer to test its
* content for validity.
*/
- if (sizeof(buffer) != packet_read(pak,
- sizeof(buffer), buffer,
- sizeof(buffer), 0)) {
+ if (sizeof (buffer) != packet_read(pak,
+ sizeof (buffer), buffer,
+ sizeof (buffer), 0)) {
goto failexit;
}
/*
@@ -911,7 +868,7 @@
*/
packet_reset(pak);
- native_size = (__force __be32 *)(&buffer[4+2]);
+ native_size = (__force __be32 *) (&buffer[4 + 2]);
/*
* Verify that the size of the packet is correct
* as indicated and that there's actually someone reading packets.
@@ -920,25 +877,23 @@
*/
if (size < 10 ||
be32_to_cpu(*native_size) != size ||
- 0 == dataex.has_opener ||
- tpmif->status != CONNECTED) {
- rc = -EINVAL;
- goto failexit;
+ 0 == dataex.has_opener || tpmif->status != CONNECTED) {
+ rc = -EINVAL;
+ goto failexit;
} else {
- if ((rc = vtpm_queue_packet(pak)) < 0) {
+ rc = vtpm_queue_packet(pak);
+ if (rc < 0)
goto failexit;
- }
}
return 0;
-failexit:
+ failexit:
if (pak) {
- tpm_send_fail_message(pak, buffer[4+1]);
+ tpm_send_fail_message(pak, buffer[4 + 1]);
packet_free(pak);
}
return rc;
}
-
/*
* Timeout function that gets invoked when a packet has not been processed
@@ -951,22 +906,23 @@
{
struct packet *pak = (struct packet *)ptr;
unsigned long flags;
+
write_lock_irqsave(&dataex.pak_lock, flags);
/*
* The packet needs to be searched whether it
* is still on the list.
*/
if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
- pak == packet_find_packet(&dataex.current_pak, pak) ) {
+ pak == packet_find_packet(&dataex.current_pak, pak)) {
list_del(&pak->next);
- tpm_send_fail_message(pak, pak->req_tag);
+ if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
+ tpm_send_fail_message(pak, pak->req_tag);
+ }
packet_free(pak);
}
write_unlock_irqrestore(&dataex.pak_lock, flags);
}
-
-
static void tpm_tx_action(unsigned long unused);
static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
@@ -974,21 +930,18 @@
static struct list_head tpm_schedule_list;
static spinlock_t tpm_schedule_list_lock;
-static inline void
-maybe_schedule_tx_action(void)
+static inline void maybe_schedule_tx_action(void)
{
smp_mb();
tasklet_schedule(&tpm_tx_tasklet);
}
-static inline int
-__on_tpm_schedule_list(tpmif_t * tpmif)
+static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
{
return tpmif->list.next != NULL;
}
-static void
-remove_from_tpm_schedule_list(tpmif_t * tpmif)
+static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
{
spin_lock_irq(&tpm_schedule_list_lock);
if (likely(__on_tpm_schedule_list(tpmif))) {
@@ -999,8 +952,7 @@
spin_unlock_irq(&tpm_schedule_list_lock);
}
-static void
-add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
+static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
{
if (__on_tpm_schedule_list(tpmif))
return;
@@ -1013,22 +965,18 @@
spin_unlock_irq(&tpm_schedule_list_lock);
}
-void
-tpmif_schedule_work(tpmif_t * tpmif)
+void tpmif_schedule_work(tpmif_t * tpmif)
{
add_to_tpm_schedule_list_tail(tpmif);
maybe_schedule_tx_action();
}
-void
-tpmif_deschedule_work(tpmif_t * tpmif)
+void tpmif_deschedule_work(tpmif_t * tpmif)
{
remove_from_tpm_schedule_list(tpmif);
}
-
-static void
-tpm_tx_action(unsigned long unused)
+static void tpm_tx_action(unsigned long unused)
{
struct list_head *ent;
tpmif_t *tpmif;
@@ -1042,10 +990,6 @@
tpmif = list_entry(ent, tpmif_t, list);
tpmif_get(tpmif);
remove_from_tpm_schedule_list(tpmif);
- /*
- * Ensure that we see the request when we read from it.
- */
- mb();
tx = &tpmif->tx->ring[0].req;
@@ -1056,22 +1000,22 @@
}
}
-irqreturn_t
-tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-{
- tpmif_t *tpmif = dev_id;
+irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ tpmif_t *tpmif = (tpmif_t *) dev_id;
+
add_to_tpm_schedule_list_tail(tpmif);
maybe_schedule_tx_action();
return IRQ_HANDLED;
}
-static int __init
-tpmback_init(void)
+static int __init tpmback_init(void)
{
int rc;
- if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
- printk(KERN_ALERT "Could not register misc device for TPM
BE.\n");
+ if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
+ printk(KERN_ALERT
+ "Could not register misc device for TPM BE.\n");
return rc;
}
@@ -1094,13 +1038,11 @@
module_init(tpmback_init);
-static void __exit
-tpmback_exit(void)
-{
-
+static void __exit tpmback_exit(void)
+{
tpmif_xenbus_exit();
tpmif_interface_exit();
- misc_deregister(&ibmvtpms_miscdevice);
+ misc_deregister(&vtpms_miscdevice);
}
module_exit(tpmback_exit);
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Mon Mar 6
17:21:35 2006
@@ -602,7 +602,6 @@
tx = &tp->tx->ring[i].req;
- tx->id = i;
tx->addr = virt_to_machine(txb->data);
tx->size = txb->len;
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_client.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_client.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_client.c Mon Mar 6
17:21:35 2006
@@ -95,18 +95,25 @@
*/
int current_state;
-
- int err = xenbus_scanf(xbt, dev->nodename, "state", "%d",
+ int err;
+
+ if (state == dev->state)
+ return 0;
+
+ err = xenbus_scanf(xbt, dev->nodename, "state", "%d",
¤t_state);
- if ((err == 1 && (XenbusState)current_state == state) ||
- err == -ENOENT)
+ if (err != 1)
return 0;
err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
if (err) {
- xenbus_dev_fatal(dev, err, "writing new state");
+ if (state != XenbusStateClosing) /* Avoid looping */
+ xenbus_dev_fatal(dev, err, "writing new state");
return err;
}
+
+ dev->state = state;
+
return 0;
}
EXPORT_SYMBOL(xenbus_switch_state);
@@ -138,7 +145,6 @@
ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
- dev->has_error = 1;
dev_err(&dev->dev, "%s\n", printf_buffer);
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Mon Mar 6
17:21:35 2006
@@ -89,14 +89,18 @@
{
struct xenstore_domain_interface *intf = xenstore_domain_interface();
XENSTORE_RING_IDX cons, prod;
+ int rc;
while (len != 0) {
void *dst;
unsigned int avail;
- wait_event_interruptible(xb_waitq,
- (intf->req_prod - intf->req_cons) !=
- XENSTORE_RING_SIZE);
+ rc = wait_event_interruptible(
+ xb_waitq,
+ (intf->req_prod - intf->req_cons) !=
+ XENSTORE_RING_SIZE);
+ if (rc < 0)
+ return rc;
/* Read indexes, then verify. */
cons = intf->req_cons;
@@ -130,13 +134,17 @@
{
struct xenstore_domain_interface *intf = xenstore_domain_interface();
XENSTORE_RING_IDX cons, prod;
+ int rc;
while (len != 0) {
unsigned int avail;
const char *src;
- wait_event_interruptible(xb_waitq,
- intf->rsp_cons != intf->rsp_prod);
+ rc = wait_event_interruptible(
+ xb_waitq,
+ intf->rsp_cons != intf->rsp_prod);
+ if (rc < 0)
+ return rc;
/* Read indexes, then verify. */
cons = intf->rsp_cons;
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Mon Mar 6
17:21:35 2006
@@ -45,9 +45,7 @@
#include <asm/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/xen_proc.h>
-#include <xen/balloon.h>
#include <xen/evtchn.h>
-#include <xen/public/evtchn.h>
#include "xenbus_comms.h"
@@ -886,9 +884,33 @@
EXPORT_SYMBOL(unregister_xenstore_notifier);
+static int all_devices_ready_(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ int *result = data;
+
+ if (xendev->state != XenbusStateConnected) {
+ result = 0;
+ return 1;
+ }
+
+ return 0;
+}
+
+
+static int all_devices_ready(void)
+{
+ int ready = 1;
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, &ready,
+ all_devices_ready_);
+ return ready;
+}
+
void xenbus_probe(void *unused)
{
+ int i;
+
BUG_ON((xenstored_ready <= 0));
/* Enumerate devices in xenstore. */
@@ -901,12 +923,50 @@
/* Notify others that xenstore is up */
notifier_call_chain(&xenstore_chain, 0, NULL);
-}
-
-
+
+ /* On a 10 second timeout, waiting for all devices currently
+ configured. We need to do this to guarantee that the filesystems
+ and / or network devices needed for boot are available, before we
+ can allow the boot to proceed.
+
+ A possible improvement here would be to have the tools add a
+ per-device flag to the store entry, indicating whether it is needed
+ at boot time. This would allow people who knew what they were
+ doing to accelerate their boot slightly, but of course needs tools
+ or manual intervention to set up those flags correctly.
+ */
+ for (i = 0; i < 10 * HZ; i++) {
+ if (all_devices_ready())
+ return;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ printk(KERN_WARNING
+ "XENBUS: Timeout connecting to devices!\n");
+}
+
+
+static struct file_operations xsd_kva_fops;
static struct proc_dir_entry *xsd_kva_intf;
static struct proc_dir_entry *xsd_port_intf;
+static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+
+ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+ return -EINVAL;
+
+ vma->vm_pgoff = mfn_to_pfn(xen_start_info->store_mfn);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
static int xsd_kva_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
@@ -980,9 +1040,14 @@
xen_start_info->store_evtchn = op.u.alloc_unbound.port;
/* And finally publish the above info in /proc/xen */
- if((xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0400)))
+ if ((xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0400))) {
+ memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
+ sizeof(xsd_kva_fops));
+ xsd_kva_fops.mmap = xsd_kva_mmap;
+ xsd_kva_intf->proc_fops = &xsd_kva_fops;
xsd_kva_intf->read_proc = xsd_kva_read;
- if((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400)))
+ }
+ if ((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400)))
xsd_port_intf->read_proc = xsd_port_read;
}
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Mon Mar 6
16:09:18 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Mon Mar 6
17:21:35 2006
@@ -123,8 +123,9 @@
while (list_empty(&xs_state.reply_list)) {
spin_unlock(&xs_state.reply_lock);
- wait_event_interruptible(xs_state.reply_waitq,
- !list_empty(&xs_state.reply_list));
+ /* XXX FIXME: Avoid synchronous wait for response here. */
+ wait_event(xs_state.reply_waitq,
+ !list_empty(&xs_state.reply_list));
spin_lock(&xs_state.reply_lock);
}
@@ -685,6 +686,9 @@
wait_event_interruptible(watch_events_waitq,
!list_empty(&watch_events));
+ if (kthread_should_stop())
+ break;
+
down(&xenwatch_mutex);
spin_lock(&watch_events_lock);
@@ -705,6 +709,8 @@
up(&xenwatch_mutex);
}
+
+ return 0;
}
static int process_msg(void)
@@ -778,7 +784,11 @@
if (err)
printk(KERN_WARNING "XENBUS error %d while reading "
"message\n", err);
- }
+ if (kthread_should_stop())
+ break;
+ }
+
+ return 0;
}
int xs_init(void)
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h Mon Mar
6 16:09:18 2006
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h Mon Mar
6 17:21:35 2006
@@ -16,7 +16,7 @@
return "Xen";
}
-void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
+void __devinit machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
{
clear_bit(X86_FEATURE_VME, c->x86_capability);
clear_bit(X86_FEATURE_DE, c->x86_capability);
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/setup_arch_post.h
--- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/setup_arch_post.h
Mon Mar 6 16:09:18 2006
+++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/setup_arch_post.h
Mon Mar 6 17:21:35 2006
@@ -1,28 +1,12 @@
/**
- * machine_specific_memory_setup - Hook for machine specific memory setup.
+ * machine_specific_* - Hooks for machine specific setup.
*
* Description:
* This is included late in kernel/setup.c so that it can make
* use of all of the static functions.
**/
-static char * __init machine_specific_memory_setup(void)
-{
- char *who;
- unsigned long start_pfn, max_pfn;
-
- who = "Xen";
-
- start_pfn = 0;
- max_pfn = xen_start_info->nr_pages;
-
- e820.nr_map = 0;
- add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) -
PFN_PHYS(start_pfn), E820_RAM);
-
- return who;
-}
-
-void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
+void __cpuinit machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
{
clear_bit(X86_FEATURE_VME, c->x86_capability);
clear_bit(X86_FEATURE_DE, c->x86_capability);
diff -r ede16886f979 -r c4ac21dc3f16 linux-2.6-xen-sparse/include/xen/xenbus.h
--- a/linux-2.6-xen-sparse/include/xen/xenbus.h Mon Mar 6 16:09:18 2006
+++ b/linux-2.6-xen-sparse/include/xen/xenbus.h Mon Mar 6 17:21:35 2006
@@ -63,7 +63,7 @@
int otherend_id;
struct xenbus_watch otherend_watch;
struct device dev;
- int has_error;
+ XenbusState state;
void *data;
};
diff -r ede16886f979 -r c4ac21dc3f16
tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c
--- a/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c
Mon Mar 6 16:09:18 2006
+++ b/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c
Mon Mar 6 17:21:35 2006
@@ -41,8 +41,6 @@
#define TRACE_ENTER /* printf("enter %s\n", __FUNCTION__) */
-long (*myptrace)(int xc_handle, enum __ptrace_request, uint32_t, long, long);
-int (*myxcwait)(int xc_handle, int domain, int *status, int options) ;
static int xc_handle;
static inline int
@@ -170,7 +168,7 @@
add_thread (0, new_process);
new_process->stop_expected = 0;
- if (myptrace (xc_handle, PTRACE_ATTACH, domid, 0, 0) != 0) {
+ if (xc_ptrace (xc_handle, PTRACE_ATTACH, domid, 0, isfile) != 0) {
fprintf (stderr, "Cannot attach to domain %d: %s (%d)\n", domid,
strerror (errno), errno);
fflush (stderr);
@@ -188,7 +186,7 @@
{
struct thread_info *thread = (struct thread_info *) entry;
struct process_info *process = get_thread_process (thread);
- myptrace (xc_handle, PTRACE_KILL, pid_of (process), 0, 0);
+ xc_ptrace (xc_handle, PTRACE_KILL, pid_of (process), 0, 0);
}
@@ -202,7 +200,7 @@
linux_detach_one_process (struct inferior_list_entry *entry)
{
- myptrace (xc_handle, PTRACE_DETACH, current_domid, 0, 0);
+ xc_ptrace (xc_handle, PTRACE_DETACH, current_domid, 0, 0);
}
@@ -228,7 +226,7 @@
linux_wait (char *status)
{
int w;
- if (myxcwait(xc_handle, current_domid, &w, 0))
+ if (xc_waitdomain(xc_handle, current_domid, &w, 0))
return -1;
linux_set_inferior();
@@ -250,7 +248,7 @@
for_each_inferior(&all_threads, regcache_invalidate_one);
if (debug_threads)
fprintf(stderr, "step: %d\n", step);
- myptrace (xc_handle, step ? PTRACE_SINGLESTEP : PTRACE_CONT,
+ xc_ptrace (xc_handle, step ? PTRACE_SINGLESTEP : PTRACE_CONT,
resume_info->thread, 0, 0);
}
@@ -275,7 +273,7 @@
}
buf = malloc (regset->size);
- res = myptrace (xc_handle, regset->get_request,
+ res = xc_ptrace (xc_handle, regset->get_request,
curvcpuid(),
0, (PTRACE_XFER_TYPE)buf);
if (res < 0)
@@ -329,7 +327,7 @@
buf = malloc (regset->size);
regset->fill_function (buf);
- res = myptrace (xc_handle, regset->set_request, curvcpuid(), 0,
(PTRACE_XFER_TYPE)buf);
+ res = xc_ptrace (xc_handle, regset->set_request, curvcpuid(), 0,
(PTRACE_XFER_TYPE)buf);
if (res < 0)
{
if (errno == EIO)
@@ -407,7 +405,7 @@
for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
{
errno = 0;
- buffer[i] = myptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(),
(PTRACE_ARG3_TYPE) addr, 0);
+ buffer[i] = xc_ptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(),
(PTRACE_ARG3_TYPE) addr, 0);
if (errno)
return errno;
}
@@ -440,13 +438,13 @@
/* Fill start and end extra bytes of buffer with existing memory data. */
- buffer[0] = myptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(),
+ buffer[0] = xc_ptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(),
(PTRACE_ARG3_TYPE) addr, 0);
if (count > 1)
{
buffer[count - 1]
- = myptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(),
+ = xc_ptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(),
(PTRACE_ARG3_TYPE) (addr + (count - 1)
* sizeof (PTRACE_XFER_TYPE)),
0);
@@ -460,7 +458,7 @@
for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
{
errno = 0;
- myptrace (xc_handle, PTRACE_POKETEXT, curvcpuid(),
+ xc_ptrace (xc_handle, PTRACE_POKETEXT, curvcpuid(),
(PTRACE_ARG3_TYPE) addr, buffer[i]);
if (errno)
return errno;
@@ -561,13 +559,6 @@
the_low_target.breakpoint_len);
init_registers ();
linux_init_signals ();
- if (isfile) {
- myptrace = xc_ptrace_core;
- myxcwait = xc_waitdomain_core;
- } else {
- myptrace = xc_ptrace;
- myxcwait = xc_waitdomain;
- }
using_threads = thread_db_init ();
}
diff -r ede16886f979 -r c4ac21dc3f16 tools/debugger/gdb/gdbbuild
--- a/tools/debugger/gdb/gdbbuild Mon Mar 6 16:09:18 2006
+++ b/tools/debugger/gdb/gdbbuild Mon Mar 6 17:21:35 2006
@@ -1,7 +1,9 @@
#!/bin/sh
+[ "$GDB_MIRROR" ] || GDB_MIRROR="ftp://ftp.gnu.org/gnu/gdb/"
+
rm -rf gdb-6.2.1 gdb-6.2.1-linux-i386-xen
-[ -a gdb-6.2.1.tar.bz2 ] || wget -c ftp://ftp.gnu.org/gnu/gdb/gdb-6.2.1.tar.bz2
+[ -a gdb-6.2.1.tar.bz2 ] || wget -c "$GDB_MIRROR/gdb-6.2.1.tar.bz2"
tar xjf gdb-6.2.1.tar.bz2
cd gdb-6.2.1-xen-sparse
@@ -12,8 +14,10 @@
cd gdb-6.2.1-linux-i386-xen
../gdb-6.2.1/configure
-# some people don't have gmake
-if which gmake ; then
+# Use $MAKE if set, else use gmake if present, otherwise use make
+if [ "$MAKE" ]; then
+ $MAKE
+elif which gmake ; then
gmake -j4
else
make -j4
diff -r ede16886f979 -r c4ac21dc3f16 tools/examples/block
--- a/tools/examples/block Mon Mar 6 16:09:18 2006
+++ b/tools/examples/block Mon Mar 6 17:21:35 2006
@@ -129,7 +129,14 @@
same_vm()
{
local otherdom="$1"
- local othervm=$(xenstore-read "/local/domain/$otherdom/vm")
+ # Note that othervm can be MISSING here, because Xend will be racing with
+ # the hotplug scripts -- the entries in /local/domain can be removed by
+ # Xend before the hotplug scripts have removed the entry in
+ # /local/domain/0/backend/. In this case, we want to pretend that the
+ # VM is the same as FRONTEND_UUID, because that way the 'sharing' will be
+ # allowed.
+ local othervm=$(xenstore_read_default "/local/domain/$otherdom/vm" \
+ "$FRONTEND_UUID")
[ "$FRONTEND_UUID" == "$othervm" ]
}
@@ -314,7 +321,28 @@
fi
fi
- f=$(readlink -f "$f")
+ # Canonicalise the filename for the comparison.
+
+ # I have seen this readlink fails because the filename given by
+ # losetup is only the basename. This cannot happen when the loop
+ # device is set up through this script, because file is
+ # canonicalised above, but it may happen when loop devices are set
+ # up some other way. This readlink may also conceivably fail if
+ # the file backing this loop device has been removed.
+
+ # For maximum safety, in the case that $f does not resolve, we
+ # assume that $file and $f are in the same directory.
+
+ # If you create a loopback filesystem, remove it and continue to
+ # run on it, and then create another file with the same name, then
+ # this check will block that -- don't do that.
+
+ # If you create loop devices through some other mechanism, use
+ # relative filenames, and then use the same filename through this
+ # script, then this check will block that -- don't do that either.
+
+ f=$(readlink -f "$f" || echo $(dirname "$file")/$(basename "$f"))
+
if [ "$f" == "$file" ]
then
diff -r ede16886f979 -r c4ac21dc3f16 tools/examples/xen-hotplug-cleanup
--- a/tools/examples/xen-hotplug-cleanup Mon Mar 6 16:09:18 2006
+++ b/tools/examples/xen-hotplug-cleanup Mon Mar 6 17:21:35 2006
@@ -12,10 +12,11 @@
claim_lock "block"
# remove device frontend store entries
-xenstore-rm -t $(xenstore-read "$XENBUS_PATH/frontend") || true
+xenstore-rm -t \
+ $(xenstore-read "$XENBUS_PATH/frontend" 2>/dev/null) 2>/dev/null || true
# remove device backend store entries
-xenstore-rm -t "$XENBUS_PATH" || true
-xenstore-rm -t "error/$XENBUS_PATH" || true
+xenstore-rm -t "$XENBUS_PATH" 2>/dev/null || true
+xenstore-rm -t "error/$XENBUS_PATH" 2>/dev/null || true
release_lock "block"
diff -r ede16886f979 -r c4ac21dc3f16 tools/firmware/hvmloader/Makefile
--- a/tools/firmware/hvmloader/Makefile Mon Mar 6 16:09:18 2006
+++ b/tools/firmware/hvmloader/Makefile Mon Mar 6 17:21:35 2006
@@ -17,6 +17,9 @@
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA 02111-1307 USA.
#
+
+# External CFLAGS can do more harm than good.
+CFLAGS :=
XEN_ROOT = ../../..
include $(XEN_ROOT)/Config.mk
diff -r ede16886f979 -r c4ac21dc3f16 tools/firmware/vmxassist/Makefile
--- a/tools/firmware/vmxassist/Makefile Mon Mar 6 16:09:18 2006
+++ b/tools/firmware/vmxassist/Makefile Mon Mar 6 17:21:35 2006
@@ -17,6 +17,9 @@
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA 02111-1307 USA.
#
+
+# External CFLAGS can do more harm than good.
+CFLAGS :=
XEN_ROOT = ../../..
include $(XEN_ROOT)/Config.mk
diff -r ede16886f979 -r c4ac21dc3f16 tools/firmware/vmxassist/setup.c
--- a/tools/firmware/vmxassist/setup.c Mon Mar 6 16:09:18 2006
+++ b/tools/firmware/vmxassist/setup.c Mon Mar 6 17:21:35 2006
@@ -123,6 +123,8 @@
void
setup_gdt(void)
{
+ unsigned long long addr = (unsigned long long) &tss;
+
/* setup task state segment */
memset(&tss, 0, sizeof(tss));
tss.ss0 = DATA_SELECTOR;
@@ -130,8 +132,7 @@
tss.iomap_base = offsetof(struct tss, iomap);
/* initialize gdt's tss selector */
- unsigned long long addr = (unsigned long long) &tss;
- gdt[TSS_SELECTOR / sizeof(gdt[0])] |=
+ gdt[TSS_SELECTOR / sizeof(gdt[0])] |=
((addr & 0xFF000000) << (56-24)) |
((addr & 0x00FF0000) << (32-16)) |
((addr & 0x0000FFFF) << (16)) |
diff -r ede16886f979 -r c4ac21dc3f16 tools/ioemu/audio/audio.c
--- a/tools/ioemu/audio/audio.c Mon Mar 6 16:09:18 2006
+++ b/tools/ioemu/audio/audio.c Mon Mar 6 17:21:35 2006
@@ -713,7 +713,7 @@
delta = now - sw->old_ticks;
bytes = (delta * sw->bytes_per_second) / ticks_per_sec;
if (delta < 0) {
- dolog ("whoops delta(<0)=%lld\n", delta);
+ dolog ("whoops delta(<0)=%"PRId64"\n", delta);
return 0;
}
diff -r ede16886f979 -r c4ac21dc3f16 tools/ioemu/hw/i8254.c
--- a/tools/ioemu/hw/i8254.c Mon Mar 6 16:09:18 2006
+++ b/tools/ioemu/hw/i8254.c Mon Mar 6 17:21:35 2006
@@ -249,7 +249,7 @@
req->u.data |= (irq << 16);
req->u.data |= (hvm_channel << 24);
req->u.data |= ((s->rw_mode) << 26);
- fprintf(logfile, "HVM_PIT:pass info 0x%llx to HV!\n", req->u.data);
+ fprintf(logfile, "HVM_PIT:pass info 0x%"PRIx64" to HV!\n", req->u.data);
}
static inline void pit_load_count(PITChannelState *s, int val)
diff -r ede16886f979 -r c4ac21dc3f16 tools/ioemu/monitor.c
--- a/tools/ioemu/monitor.c Mon Mar 6 16:09:18 2006
+++ b/tools/ioemu/monitor.c Mon Mar 6 17:21:35 2006
@@ -676,19 +676,19 @@
break;
case '-':
{
- int has_option;
+ long has_option;
/* option */
-
+
c = *typestr++;
if (c == '\0')
goto bad_type;
- while (isspace(*p))
+ while (isspace(*p))
p++;
has_option = 0;
if (*p == '-') {
p++;
if (*p != c) {
- term_printf("%s: unsupported option -%c\n",
+ term_printf("%s: unsupported option -%c\n",
cmdname, *p);
goto fail;
}
diff -r ede16886f979 -r c4ac21dc3f16 tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c Mon Mar 6 16:09:18 2006
+++ b/tools/ioemu/target-i386-dm/helper2.c Mon Mar 6 17:21:35 2006
@@ -138,11 +138,11 @@
req = &(shared_page->vcpu_iodata[i].vp_ioreq);
term_printf("vcpu %d: event port %d\n",
i, shared_page->vcpu_iodata[i].vp_eport);
- term_printf(" req state: %x, pvalid: %x, addr: %llx, "
- "data: %llx, count: %llx, size: %llx\n",
+ term_printf(" req state: %x, pvalid: %x, addr: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
req->state, req->pdata_valid, req->addr,
req->u.data, req->count, req->size);
- term_printf(" IO totally occurred on this vcpu: %llx\n",
+ term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n",
req->io_count);
}
}
@@ -158,8 +158,8 @@
return req;
fprintf(logfile, "False I/O request ... in-service already: "
- "%x, pvalid: %x, port: %llx, "
- "data: %llx, count: %llx, size: %llx\n",
+ "%x, pvalid: %x, port: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
req->state, req->pdata_valid, req->addr,
req->u.data, req->count, req->size);
return NULL;
@@ -460,12 +460,6 @@
FD_ZERO(&wakeup_rfds);
FD_SET(evtchn_fd, &wakeup_rfds);
-#if __WORDSIZE == 32
-#define ULONGLONG_MAX 0xffffffffffffffffULL
-#else
-#define ULONGLONG_MAX ULONG_MAX
-#endif
-
tun_receive_handler(&rfds);
if ( FD_ISSET(evtchn_fd, &rfds) ) {
cpu_handle_ioreq(env);
diff -r ede16886f979 -r c4ac21dc3f16 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c Mon Mar 6 16:09:18 2006
+++ b/tools/ioemu/vl.c Mon Mar 6 17:21:35 2006
@@ -2672,6 +2672,7 @@
char qemu_dm_logfilename[64];
const char *loadvm = NULL;
unsigned long nr_pages, extra_pages, ram_pages, *page_array;
+ xc_dominfo_t info;
extern void *shared_page;
extern void *shared_vram;
@@ -3132,7 +3133,7 @@
ram_pages = ram_size/PAGE_SIZE;
#if defined(__i386__) || defined(__x86_64__)
- vgaram_pages = (vga_ram_size -1)/PAGE_SIZE + 1;
+ vgaram_pages = (vga_ram_size -1) / PAGE_SIZE + 1;
free_pages = vgaram_pages / L1_PAGETABLE_ENTRIES;
extra_pages = vgaram_pages + free_pages;
#else
@@ -3142,7 +3143,6 @@
xc_handle = xc_interface_open();
- xc_dominfo_t info;
xc_domain_getinfo(xc_handle, domid, 1, &info);
nr_pages = info.nr_pages + extra_pages;
diff -r ede16886f979 -r c4ac21dc3f16 tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c Mon Mar 6 16:09:18 2006
+++ b/tools/libxc/xc_core.c Mon Mar 6 17:21:35 2006
@@ -61,7 +61,7 @@
nr_pages = info.nr_pages;
- header.xch_magic = 0xF00FEBED;
+ header.xch_magic = XC_CORE_MAGIC;
header.xch_nr_vcpus = nr_vcpus;
header.xch_nr_pages = nr_pages;
header.xch_ctxt_offset = sizeof(struct xc_core_header);
@@ -71,8 +71,12 @@
(sizeof(vcpu_guest_context_t) *
nr_vcpus) +
(nr_pages * sizeof(unsigned long)));
- write(dump_fd, &header, sizeof(struct xc_core_header));
- write(dump_fd, &ctxt, sizeof(ctxt[0]) * nr_vcpus);
+ if (write(dump_fd, &header, sizeof(struct xc_core_header)) < 0 ||
+ write(dump_fd, &ctxt, sizeof(ctxt[0]) * nr_vcpus) < 0)
+ {
+ PERROR("write failed");
+ goto error_out;
+ }
if ((page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
printf("Could not allocate memory\n");
@@ -82,7 +86,11 @@
printf("Could not get the page frame list\n");
goto error_out;
}
- write(dump_fd, page_array, nr_pages * sizeof(unsigned long));
+ if (write(dump_fd, page_array, nr_pages * sizeof(unsigned long)) < 0)
+ {
+ PERROR("write failed");
+ goto error_out;
+ }
lseek(dump_fd, header.xch_pages_offset, SEEK_SET);
for (dump_mem = dump_mem_start, i = 0; i < nr_pages; i++) {
copy_from_domain_page(xc_handle, domid, page_array, i, dump_mem);
diff -r ede16886f979 -r c4ac21dc3f16 tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c Mon Mar 6 16:09:18 2006
+++ b/tools/libxc/xc_ptrace.c Mon Mar 6 17:21:35 2006
@@ -7,14 +7,13 @@
#include "xc_private.h"
#include "xg_private.h"
-#include <thread_db.h>
#include "xc_ptrace.h"
-
/* XXX application state */
static long nr_pages = 0;
static unsigned long *page_array = NULL;
static int current_domid = -1;
+static int current_isfile;
static cpumap_t online_cpumap;
static cpumap_t regs_valid;
@@ -32,7 +31,8 @@
if (online)
*online = 0;
- if ( !(regs_valid & (1 << cpu)) ) {
+ if ( !(regs_valid & (1 << cpu)) )
+ {
retval = xc_vcpu_getcontext(xc_handle, current_domid,
cpu, &ctxt[cpu]);
if ( retval )
@@ -50,9 +50,6 @@
return retval;
}
-#define FETCH_REGS(cpu) if (fetch_regs(xc_handle, cpu, NULL)) goto error_out;
-
-
static struct thr_ev_handlers {
thr_ev_handler_t td_create;
thr_ev_handler_t td_death;
@@ -95,14 +92,12 @@
*cpumap = 0;
for (i = 0; i <= d->max_vcpu_id; i++) {
if ((retval = fetch_regs(xc_handle, i, &online)))
- goto error_out;
+ return retval;
if (online)
*cpumap |= (1 << i);
}
return 0;
- error_out:
- return retval;
}
/*
@@ -118,7 +113,8 @@
int index;
while ( (index = ffsll(changed_cpumap)) ) {
- if ( cpumap & (1 << (index - 1)) ) {
+ if ( cpumap & (1 << (index - 1)) )
+ {
if (handlers.td_create) handlers.td_create(index - 1);
} else {
printf("thread death: %d\n", index - 1);
@@ -143,34 +139,32 @@
uint64_t *l3, *l2, *l1;
static void *v;
- FETCH_REGS(cpu);
+ if (fetch_regs(xc_handle, cpu, NULL))
+ return NULL;
l3 = xc_map_foreign_range(
xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3]
>> PAGE_SHIFT);
if ( l3 == NULL )
- goto error_out;
+ return NULL;
l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT;
l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ,
l2p);
if ( l2 == NULL )
- goto error_out;
+ return NULL;
l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT;
l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
if ( l1 == NULL )
- goto error_out;
+ return NULL;
p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT;
if ( v != NULL )
munmap(v, PAGE_SIZE);
v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
if ( v == NULL )
- goto error_out;
+ return NULL;
return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1)));
-
- error_out:
- return NULL;
}
static void *
@@ -215,17 +209,18 @@
if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
{
printf("Could not allocate memory\n");
- goto error_out;
+ return NULL;
}
if ( xc_get_pfn_list(xc_handle, current_domid,
page_array, nr_pages) != nr_pages )
{
printf("Could not get the page frame list\n");
- goto error_out;
+ return NULL;
}
}
- FETCH_REGS(cpu);
+ if (fetch_regs(xc_handle, cpu, NULL))
+ return NULL;
if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
{
@@ -236,10 +231,10 @@
xc_handle, current_domid, PAGE_SIZE, PROT_READ,
cr3_phys[cpu] >> PAGE_SHIFT);
if ( cr3_virt[cpu] == NULL )
- goto error_out;
+ return NULL;
}
if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
- goto error_out;
+ return NULL;
if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
if ( pde != pde_phys[cpu] )
@@ -251,10 +246,10 @@
xc_handle, current_domid, PAGE_SIZE, PROT_READ,
pde_phys[cpu] >> PAGE_SHIFT);
if ( pde_virt[cpu] == NULL )
- goto error_out;
+ return NULL;
}
if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
- goto error_out;
+ return NULL;
if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
@@ -268,19 +263,16 @@
if ( page_virt[cpu] == NULL )
{
page_phys[cpu] = 0;
- goto error_out;
+ return NULL;
}
prev_perm[cpu] = perm;
}
return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
-
- error_out:
- return NULL;
-}
-
-int
-xc_waitdomain(
+}
+
+static int
+__xc_waitdomain(
int xc_handle,
int domain,
int *status,
@@ -335,7 +327,6 @@
long edata)
{
DECLARE_DOM0_OP;
- int status = 0;
struct gdb_regs pt;
long retval = 0;
unsigned long *guest_va;
@@ -350,84 +341,83 @@
{
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
- guest_va = (unsigned long *)map_domain_va(
- xc_handle, cpu, addr, PROT_READ);
+ if (current_isfile)
+ guest_va = (unsigned long *)map_domain_va_core(current_domid,
+ cpu, addr, ctxt);
+ else
+ guest_va = (unsigned long *)map_domain_va(xc_handle,
+ cpu, addr, PROT_READ);
if ( guest_va == NULL )
- {
- status = EFAULT;
- goto error_out;
- }
+ goto out_error;
retval = *guest_va;
break;
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
/* XXX assume that all CPUs have the same address space */
- guest_va = (unsigned long *)map_domain_va(
- xc_handle, cpu, addr, PROT_READ|PROT_WRITE);
- if ( guest_va == NULL ) {
- status = EFAULT;
- goto error_out;
- }
+ if (current_isfile)
+ guest_va = (unsigned long *)map_domain_va_core(current_domid,
+ cpu, addr, ctxt);
+ else
+ guest_va = (unsigned long *)map_domain_va(xc_handle,
+ cpu, addr, PROT_READ|PROT_WRITE);
+ if ( guest_va == NULL )
+ goto out_error;
*guest_va = (unsigned long)data;
break;
case PTRACE_GETREGS:
+ if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
+ goto out_error;
+ SET_PT_REGS(pt, ctxt[cpu].user_regs);
+ memcpy(data, &pt, sizeof(struct gdb_regs));
+ break;
+
case PTRACE_GETFPREGS:
case PTRACE_GETFPXREGS:
-
- FETCH_REGS(cpu);
- if ( request == PTRACE_GETREGS )
- {
- SET_PT_REGS(pt, ctxt[cpu].user_regs);
- memcpy(data, &pt, sizeof(struct gdb_regs));
- }
- else if (request == PTRACE_GETFPREGS)
- {
- memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
- }
- else /*if (request == PTRACE_GETFPXREGS)*/
- {
- memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
- }
+ if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
+ goto out_error;
+ memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
break;
case PTRACE_SETREGS:
+ if (!current_isfile)
+ goto out_unspported; /* XXX not yet supported */
SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
- retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, &ctxt[cpu]);
- if (retval)
- goto error_out;
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
+ &ctxt[cpu])))
+ goto out_error_dom0;
break;
case PTRACE_SINGLESTEP:
+ if (!current_isfile)
+ goto out_unspported; /* XXX not yet supported */
/* XXX we can still have problems if the user switches threads
* during single-stepping - but that just seems retarded
*/
ctxt[cpu].user_regs.eflags |= PSL_T;
- retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, &ctxt[cpu]);
- if ( retval )
- {
- perror("dom0 op failed");
- goto error_out;
- }
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
+ &ctxt[cpu])))
+ goto out_error_dom0;
/* FALLTHROUGH */
case PTRACE_CONT:
case PTRACE_DETACH:
+ if (!current_isfile)
+ goto out_unspported; /* XXX not yet supported */
if ( request != PTRACE_SINGLESTEP )
{
FOREACH_CPU(cpumap, index) {
cpu = index - 1;
- FETCH_REGS(cpu);
+ if (fetch_regs(xc_handle, cpu, NULL))
+ goto out_error;
/* Clear trace flag */
- if ( ctxt[cpu].user_regs.eflags & PSL_T ) {
+ if ( ctxt[cpu].user_regs.eflags & PSL_T )
+ {
ctxt[cpu].user_regs.eflags &= ~PSL_T;
- retval = xc_vcpu_setcontext(xc_handle, current_domid,
- cpu, &ctxt[cpu]);
- if ( retval ) {
- perror("dom0 op failed");
- goto error_out;
- }
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
+ cpu, &ctxt[cpu])))
+ goto out_error_dom0;
}
}
}
@@ -436,31 +426,34 @@
op.cmd = DOM0_SETDEBUGGING;
op.u.setdebugging.domain = current_domid;
op.u.setdebugging.enable = 0;
- retval = do_dom0_op(xc_handle, &op);
+ if ((retval = do_dom0_op(xc_handle, &op)))
+ goto out_error_dom0;
}
regs_valid = 0;
- xc_domain_unpause(xc_handle, current_domid > 0 ? current_domid :
-current_domid);
+ if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
+ current_domid : -current_domid)))
+ goto out_error_dom0;
break;
case PTRACE_ATTACH:
current_domid = domid_tid;
+ current_isfile = (int)edata;
+ if (current_isfile)
+ break;
op.cmd = DOM0_GETDOMAININFO;
op.u.getdomaininfo.domain = current_domid;
retval = do_dom0_op(xc_handle, &op);
if ( retval || (op.u.getdomaininfo.domain != current_domid) )
- {
- perror("dom0 op failed");
- goto error_out;
- }
+ goto out_error_dom0;
if ( op.u.getdomaininfo.flags & DOMFLAGS_PAUSED )
- {
printf("domain currently paused\n");
- } else
- retval = xc_domain_pause(xc_handle, current_domid);
+ else if ((retval = xc_domain_pause(xc_handle, current_domid)))
+ goto out_error_dom0;
op.cmd = DOM0_SETDEBUGGING;
op.u.setdebugging.domain = current_domid;
op.u.setdebugging.enable = 1;
- retval = do_dom0_op(xc_handle, &op);
+ if ((retval = do_dom0_op(xc_handle, &op)))
+ goto out_error_dom0;
if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap))
printf("get_online_cpumap failed\n");
@@ -474,26 +467,40 @@
case PTRACE_POKEUSER:
case PTRACE_SYSCALL:
case PTRACE_KILL:
-#ifdef DEBUG
- printf("unsupported xc_ptrace request %s\n", ptrace_names[request]);
-#endif
- /* XXX not yet supported */
- status = ENOSYS;
- break;
+ goto out_unspported; /* XXX not yet supported */
case PTRACE_TRACEME:
printf("PTRACE_TRACEME is an invalid request under Xen\n");
- status = EINVAL;
- }
-
- if ( status )
- {
- errno = status;
- retval = -1;
- }
-
- error_out:
+ goto out_error;
+ }
+
return retval;
+
+ out_error_dom0:
+ perror("dom0 op failed");
+ out_error:
+ errno = EINVAL;
+ return retval;
+
+ out_unspported:
+#ifdef DEBUG
+ printf("unsupported xc_ptrace request %s\n", ptrace_names[request]);
+#endif
+ errno = ENOSYS;
+ return -1;
+
+}
+
+int
+xc_waitdomain(
+ int xc_handle,
+ int domain,
+ int *status,
+ int options)
+{
+ if (current_isfile)
+ return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
+ return __xc_waitdomain(xc_handle, domain, status, options);
}
/*
diff -r ede16886f979 -r c4ac21dc3f16 tools/libxc/xc_ptrace.h
--- a/tools/libxc/xc_ptrace.h Mon Mar 6 16:09:18 2006
+++ b/tools/libxc/xc_ptrace.h Mon Mar 6 17:21:35 2006
@@ -1,5 +1,7 @@
#ifndef XC_PTRACE_
#define XC_PTRACE_
+
+#include <thread_db.h>
#ifdef XC_PTRACE_PRIVATE
#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
@@ -8,33 +10,7 @@
#define PDRSHIFT 22
#define PSL_T 0x00000100 /* trace enable bit */
-char * ptrace_names[] = {
- "PTRACE_TRACEME",
- "PTRACE_PEEKTEXT",
- "PTRACE_PEEKDATA",
- "PTRACE_PEEKUSER",
- "PTRACE_POKETEXT",
- "PTRACE_POKEDATA",
- "PTRACE_POKEUSER",
- "PTRACE_CONT",
- "PTRACE_KILL",
- "PTRACE_SINGLESTEP",
- "PTRACE_INVALID",
- "PTRACE_INVALID",
- "PTRACE_GETREGS",
- "PTRACE_SETREGS",
- "PTRACE_GETFPREGS",
- "PTRACE_SETFPREGS",
- "PTRACE_ATTACH",
- "PTRACE_DETACH",
- "PTRACE_GETFPXREGS",
- "PTRACE_SETFPXREGS",
- "PTRACE_INVALID",
- "PTRACE_INVALID",
- "PTRACE_INVALID",
- "PTRACE_INVALID",
- "PTRACE_SYSCALL",
-};
+extern const char const * ptrace_names[];
struct gdb_regs {
long ebx; /* 0 */
diff -r ede16886f979 -r c4ac21dc3f16 tools/libxc/xc_ptrace_core.c
--- a/tools/libxc/xc_ptrace_core.c Mon Mar 6 16:09:18 2006
+++ b/tools/libxc/xc_ptrace_core.c Mon Mar 6 17:21:35 2006
@@ -1,81 +1,10 @@
+#define XC_PTRACE_PRIVATE
+
#include <sys/ptrace.h>
#include <sys/wait.h>
#include "xc_private.h"
+#include "xc_ptrace.h"
#include <time.h>
-
-#define BSD_PAGE_MASK (PAGE_SIZE-1)
-#define PDRSHIFT 22
-#define VCPU 0 /* XXX */
-
-/*
- * long
- * ptrace(enum __ptrace_request request, pid_t pid, void *addr, void *data);
- */
-
-struct gdb_regs {
- long ebx; /* 0 */
- long ecx; /* 4 */
- long edx; /* 8 */
- long esi; /* 12 */
- long edi; /* 16 */
- long ebp; /* 20 */
- long eax; /* 24 */
- int xds; /* 28 */
- int xes; /* 32 */
- int xfs; /* 36 */
- int xgs; /* 40 */
- long orig_eax; /* 44 */
- long eip; /* 48 */
- int xcs; /* 52 */
- long eflags; /* 56 */
- long esp; /* 60 */
- int xss; /* 64 */
-};
-
-#define printval(x) printf("%s = %lx\n", #x, (long)x);
-#define SET_PT_REGS(pt, xc) \
-{ \
- pt.ebx = xc.ebx; \
- pt.ecx = xc.ecx; \
- pt.edx = xc.edx; \
- pt.esi = xc.esi; \
- pt.edi = xc.edi; \
- pt.ebp = xc.ebp; \
- pt.eax = xc.eax; \
- pt.eip = xc.eip; \
- pt.xcs = xc.cs; \
- pt.eflags = xc.eflags; \
- pt.esp = xc.esp; \
- pt.xss = xc.ss; \
- pt.xes = xc.es; \
- pt.xds = xc.ds; \
- pt.xfs = xc.fs; \
- pt.xgs = xc.gs; \
-}
-
-#define SET_XC_REGS(pt, xc) \
-{ \
- xc.ebx = pt->ebx; \
- xc.ecx = pt->ecx; \
- xc.edx = pt->edx; \
- xc.esi = pt->esi; \
- xc.edi = pt->edi; \
- xc.ebp = pt->ebp; \
- xc.eax = pt->eax; \
- xc.eip = pt->eip; \
- xc.cs = pt->xcs; \
- xc.eflags = pt->eflags; \
- xc.esp = pt->esp; \
- xc.ss = pt->xss; \
- xc.es = pt->xes; \
- xc.ds = pt->xds; \
- xc.fs = pt->xfs; \
- xc.gs = pt->xgs; \
-}
-
-
-#define vtopdi(va) ((va) >> PDRSHIFT)
-#define vtopti(va) (((va) >> PAGE_SHIFT) & 0x3ff)
/* XXX application state */
@@ -84,7 +13,6 @@
static unsigned long *m2p_array = NULL;
static unsigned long pages_offset;
static unsigned long cr3[MAX_VIRT_CPUS];
-static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
/* --------------------- */
@@ -92,11 +20,13 @@
map_mtop_offset(unsigned long ma)
{
return pages_offset + (m2p_array[ma >> PAGE_SHIFT] << PAGE_SHIFT);
+ return 0;
}
-static void *
-map_domain_va(unsigned long domfd, int cpu, void * guest_va)
+void *
+map_domain_va_core(unsigned long domfd, int cpu, void * guest_va,
+ vcpu_guest_context_t *ctxt)
{
unsigned long pde, page;
unsigned long va = (unsigned long)guest_va;
@@ -120,12 +50,12 @@
if (v == MAP_FAILED)
{
perror("mmap failed");
- goto error_out;
+ return NULL;
}
cr3_virt[cpu] = v;
}
if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
- goto error_out;
+ return NULL;
if (ctxt[cpu].flags & VGCF_HVM_GUEST)
pde = p2m_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
if (pde != pde_phys[cpu])
@@ -137,11 +67,11 @@
NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
map_mtop_offset(pde_phys[cpu]));
if (v == MAP_FAILED)
- goto error_out;
+ return NULL;
pde_virt[cpu] = v;
}
if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */
- goto error_out;
+ return NULL;
if (ctxt[cpu].flags & VGCF_HVM_GUEST)
page = p2m_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
if (page != page_phys[cpu])
@@ -152,17 +82,15 @@
v = mmap(
NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
map_mtop_offset(page_phys[cpu]));
- if (v == MAP_FAILED) {
+ if (v == MAP_FAILED)
+ {
printf("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page,
vtopti(va));
page_phys[cpu] = 0;
- goto error_out;
+ return NULL;
}
page_virt[cpu] = v;
}
return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
-
- error_out:
- return 0;
}
int
@@ -170,17 +98,25 @@
int xc_handle,
int domfd,
int *status,
- int options)
+ int options,
+ vcpu_guest_context_t *ctxt)
{
- int retval = -1;
int nr_vcpus;
int i;
xc_core_header_t header;
- if (nr_pages == 0) {
+ if (nr_pages == 0)
+ {
if (read(domfd, &header, sizeof(header)) != sizeof(header))
return -1;
+
+ if (header.xch_magic != XC_CORE_MAGIC) {
+ printf("Magic number missmatch: 0x%08x (file) != "
+ " 0x%08x (code)\n", header.xch_magic,
+ XC_CORE_MAGIC);
+ return -1;
+ }
nr_pages = header.xch_nr_pages;
nr_vcpus = header.xch_nr_vcpus;
@@ -193,17 +129,19 @@
for (i = 0; i < nr_vcpus; i++) {
cr3[i] = ctxt[i].ctrlreg[3];
}
- if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
+ if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL)
+ {
printf("Could not allocate p2m_array\n");
- goto error_out;
+ return -1;
}
if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
sizeof(unsigned long)*nr_pages)
return -1;
- if ((m2p_array = malloc((1<<20) * sizeof(unsigned long))) == NULL) {
+ if ((m2p_array = malloc((1<<20) * sizeof(unsigned long))) == NULL)
+ {
printf("Could not allocate m2p array\n");
- goto error_out;
+ return -1;
}
bzero(m2p_array, sizeof(unsigned long)* 1 << 20);
@@ -212,89 +150,7 @@
}
}
- retval = 0;
- error_out:
- return retval;
-
-}
-
-long
-xc_ptrace_core(
- int xc_handle,
- enum __ptrace_request request,
- uint32_t domfd,
- long eaddr,
- long edata)
-{
- int status = 0;
- struct gdb_regs pt;
- long retval = 0;
- unsigned long *guest_va;
- int cpu = VCPU;
- void *addr = (char *)eaddr;
- void *data = (char *)edata;
-
-#if 0
- printf("%20s %d, %p, %p \n", ptrace_names[request], domid, addr, data);
-#endif
- switch (request) {
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
- if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) ==
NULL) {
- status = EFAULT;
- goto error_out;
- }
-
- retval = *guest_va;
- break;
- case PTRACE_POKETEXT:
- case PTRACE_POKEDATA:
- if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) ==
NULL) {
- status = EFAULT;
- goto error_out;
- }
- *guest_va = (unsigned long)data;
- break;
- case PTRACE_GETREGS:
- case PTRACE_GETFPREGS:
- case PTRACE_GETFPXREGS:
- if (request == PTRACE_GETREGS) {
- SET_PT_REGS(pt, ctxt[cpu].user_regs);
- memcpy(data, &pt, sizeof(struct gdb_regs));
- } else if (request == PTRACE_GETFPREGS)
- memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
- else /*if (request == PTRACE_GETFPXREGS)*/
- memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
- break;
- case PTRACE_ATTACH:
- retval = 0;
- break;
- case PTRACE_SETREGS:
- case PTRACE_SINGLESTEP:
- case PTRACE_CONT:
- case PTRACE_DETACH:
- case PTRACE_SETFPREGS:
- case PTRACE_SETFPXREGS:
- case PTRACE_PEEKUSER:
- case PTRACE_POKEUSER:
- case PTRACE_SYSCALL:
- case PTRACE_KILL:
-#ifdef DEBUG
- printf("unsupported xc_ptrace request %s\n", ptrace_names[request]);
-#endif
- status = ENOSYS;
- break;
- case PTRACE_TRACEME:
- printf("PTRACE_TRACEME is an invalid request under Xen\n");
- status = EINVAL;
- }
-
- if (status) {
- errno = status;
- retval = -1;
- }
- error_out:
- return retval;
+ return 0;
}
/*
diff -r ede16886f979 -r c4ac21dc3f16 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Mon Mar 6 16:09:18 2006
+++ b/tools/libxc/xenctrl.h Mon Mar 6 17:21:35 2006
@@ -92,19 +92,26 @@
unsigned int xch_pages_offset;
} xc_core_header_t;
+#define XC_CORE_MAGIC 0xF00FEBED
long xc_ptrace_core(
int xc_handle,
enum __ptrace_request request,
uint32_t domid,
long addr,
- long data);
-
+ long data,
+ vcpu_guest_context_t *ctxt);
+void * map_domain_va_core(
+ unsigned long domfd,
+ int cpu,
+ void *guest_va,
+ vcpu_guest_context_t *ctxt);
int xc_waitdomain_core(
int xc_handle,
int domain,
int *status,
- int options);
+ int options,
+ vcpu_guest_context_t *ctxt);
/*
* DOMAIN MANAGEMENT FUNCTIONS
diff -r ede16886f979 -r c4ac21dc3f16 tools/misc/lomount/lomount.c
--- a/tools/misc/lomount/lomount.c Mon Mar 6 16:09:18 2006
+++ b/tools/misc/lomount/lomount.c Mon Mar 6 17:21:35 2006
@@ -24,16 +24,33 @@
* THE SOFTWARE.
*/
+/*
+ * Return code:
+ *
+ * bit 7 set: lomount wrapper failed
+ * bit 7 clear: lomount wrapper ok; mount's return code in low 7 bits
+ * 0 success
+ */
+
+enum
+{
+ ERR_USAGE = 0x80, // Incorrect usage
+ ERR_PART_PARSE, // Failed to parse partition table
+ ERR_NO_PART, // No such partition
+ ERR_NO_EPART, // No such extended partition
+ ERR_MOUNT // Other failure of mount command
+};
+
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
+#include <sys/wait.h>
#include <errno.h>
#define BUF 4096
-//#define SECSIZE 4096 /* arbitrarilly large (it's probably just 512) */
#define SECSIZE 512
struct pentry
@@ -50,30 +67,32 @@
unsigned long no_of_sectors_abs;
};
-char * progname;
-
-int loadptable(const char *argv, struct pentry parttbl[], struct pentry
**exttbl)
-{
- FILE *fd;
- int i, valid, total_known_sectors = 0;
+int loadptable(const char *diskimage, struct pentry parttbl[], struct pentry
**exttbl, int * valid)
+{
+ FILE *fd;
+ size_t size;
+ int fail = 1;
+ int i, total_known_sectors = 0;
unsigned char *pi;
unsigned char data [SECSIZE];
unsigned long extent = 0, old_extent = 0, e_count = 1;
struct pentry exttbls[4];
- fd = fopen(argv, "r");
+ *valid = 0;
+
+ fd = fopen(diskimage, "r");
if (fd == NULL)
{
- perror ("lomount");
- return 1;
- }
- i = fread (&data, 1, sizeof (data), fd);
- if (i < SECSIZE)
- {
- fprintf (stderr, "%s: could not read the entire first
sector\n", progname);
- return 1;
- }
- for (i = 0;i < 4;i++)
+ perror(diskimage);
+ goto done;
+ }
+ size = fread (&data, 1, sizeof(data), fd);
+ if (size < (size_t)sizeof(data))
+ {
+ fprintf(stderr, "Could not read the entire first sector of
%s.\n", diskimage);
+ goto done;
+ }
+ for (i = 0; i < 4; i++)
{
pi = &data [446 + 16 * i];
parttbl [i].bootable = *pi;
@@ -95,7 +114,7 @@
old_extent = extent;
}
}
- valid = (data [510] == 0x55 && data [511] == 0xaa) ? 1 : 0;
+ *valid = (data [510] == 0x55 && data [511] == 0xaa) ? 1 : 0;
for (i = 0; i < 4; i++)
{
total_known_sectors += parttbl[i].no_of_sectors_abs;
@@ -105,7 +124,7 @@
#ifdef DEBUG
if (extent != 0)
{
- printf("extended partition detected at offset %d\n", extent);
+ printf("extended partition detected at offset %ld\n", extent);
}
#endif
while (extent != 0)
@@ -113,14 +132,14 @@
/* according to realloc(3) passing NULL as pointer is same as calling malloc()
*/
exttbl[0] = realloc(exttbl[0], e_count * sizeof(struct pentry));
fseek(fd, extent, SEEK_SET);
- i = fread (&data, 1, sizeof (data), fd);
- if (i < SECSIZE)
- {
- fprintf (stderr, "%s: could not read the entire first
sector\n", progname);
- return 1;
+ size = fread (&data, 1, sizeof(data), fd);
+ if (size < (size_t)sizeof(data))
+ {
+ fprintf(stderr, "Could not read extended partition of
%s.", diskimage);
+ goto done;
}
/* only first 2 entrys are used in extented partition tables */
- for (i = 0;i < 2;i++)
+ for (i = 0; i < 2; i++)
{
pi = &data [446 + 16 * i];
exttbls [i].bootable = *pi;
@@ -152,7 +171,7 @@
/* adjust for start of image instead of start of ext
partition */
exttbl[0][e_count-1].start_sector_abs +=
(extent/SECSIZE);
#ifdef DEBUG
- printf("extent %d start_sector_abs %d\n",
extent, exttbl[0][e_count-1].start_sector_abs);
+ printf("extent %ld start_sector_abs %ld\n",
extent, exttbl[0][e_count-1].start_sector_abs);
#endif
//else if (parttbl[i].system == 0x5)
}
@@ -165,53 +184,71 @@
}
e_count ++;
}
- //fclose (fd);
- //the above segfaults (?!!!)
-#ifdef DEBUG
- printf("e_count = %d\n", e_count);
-#endif
- return valid;
+#ifdef DEBUG
+ printf("e_count = %ld\n", e_count);
+#endif
+ fail = 0;
+
+done:
+ if (fd)
+ fclose(fd);
+ return fail;
}
+void usage()
+{
+ fprintf(stderr, "You must specify at least -diskimage and
-partition.\n");
+ fprintf(stderr, "All other arguments are passed through to 'mount'.\n");
+ fprintf(stderr, "ex. lomount -t fs-type -diskimage hda.img -partition 1
/mnt\n");
+ exit(ERR_USAGE);
+}
+
int main(int argc, char ** argv)
{
+ int status;
struct pentry perttbl [4];
struct pentry *exttbl[1], *parttbl;
- char buf[BUF], argv2[BUF], diskimage[BUF];
- int partition = 1, sec, num = 0, pnum = 0, len = BUF, i, f = 0, valid;
- progname = argv[0];
+ char buf[BUF], argv2[BUF];
+ const char * diskimage = 0;
+ int partition = 0, sec, num = 0, pnum = 0, i, valid;
+ size_t argv2_len = sizeof(argv2);
+ argv2[0] = '\0';
exttbl[0] = NULL;
+
for (i = 1; i < argc; i ++)
{
- if (strncmp(argv[i], "-diskimage", BUF)==0)
- {
- strncpy(diskimage, argv[i+1], BUF);
- i++; f = 1;
- }
- else if (strncmp(argv[i], "-partition", BUF)==0)
- {
- partition = atoi(argv[i+1]);
+ if (strcmp(argv[i], "-diskimage")==0)
+ {
+ if (i == argc-1)
+ usage();
i++;
- if (partition < 1) partition = 1;
+ diskimage = argv[i];
+ }
+ else if (strcmp(argv[i], "-partition")==0)
+ {
+ if (i == argc-1)
+ usage();
+ i++;
+ partition = atoi(argv[i]);
}
else
{
- strncat(argv2, argv[i], len);
- strncat(argv2, " ", len-1);
- len -= strlen(argv[i]);
- len--;
- }
- }
- if (!f)
- {
- printf("You must specify -diskimage and -partition\n");
- printf("ex. lomount -t fs-type -diskimage hda.img -partition 1
/mnt\n");
- return 0;
- }
- valid = loadptable(diskimage, perttbl, exttbl);
+ size_t len = strlen(argv[i]);
+ if (len >= argv2_len-1)
+ usage();
+ strcat(argv2, argv[i]);
+ strcat(argv2, " ");
+ len -= (len+1);
+ }
+ }
+ if (! diskimage || partition < 1)
+ usage();
+
+ if (loadptable(diskimage, perttbl, exttbl, &valid))
+ return ERR_PART_PARSE;
if (!valid)
{
- printf("Warning: disk image does not appear to describe a valid
partition table.\n");
+ fprintf(stderr, "Warning: disk image does not appear to
describe a valid partition table.\n");
}
/* NOTE: need to make sure this always rounds down */
//sec = total_known_sectors / sizeof_diskimage;
@@ -228,14 +265,14 @@
{
if (exttbl[0] == NULL)
{
- printf("No extended partitions were found in %s.\n",
diskimage);
- return 2;
+ fprintf(stderr, "No extended partitions were found in
%s.\n", diskimage);
+ return ERR_NO_EPART;
}
parttbl = exttbl[0];
if (parttbl[partition-5].no_of_sectors_abs == 0)
{
- printf("Partition %d was not found in %s.\n",
partition, diskimage);
- return 3;
+ fprintf(stderr, "Partition %d was not found in %s.\n",
partition, diskimage);
+ return ERR_NO_PART;
}
partition -= 4;
}
@@ -244,8 +281,8 @@
parttbl = perttbl;
if (parttbl[partition-1].no_of_sectors_abs == 0)
{
- printf("Partition %d was not found in %s.\n",
partition, diskimage);
- return 3;
+ fprintf(stderr, "Partition %d was not found in %s.\n",
partition, diskimage);
+ return ERR_NO_PART;
}
}
num = parttbl[partition-1].start_sector_abs;
@@ -253,10 +290,14 @@
#ifdef DEBUG
printf("offset = %d\n", pnum);
#endif
- snprintf(buf, BUF, "mount -oloop,offset=%d %s %s", pnum, diskimage,
argv2);
+ snprintf(buf, sizeof(buf), "mount -oloop,offset=%d %s %s", pnum,
diskimage, argv2);
#ifdef DEBUG
printf("%s\n", buf);
#endif
- system(buf);
- return 0;
+ status = system(buf);
+ if (WIFEXITED(status))
+ status = WEXITSTATUS(status);
+ else
+ status = ERR_MOUNT;
+ return status;
}
diff -r ede16886f979 -r c4ac21dc3f16 tools/python/xen/xend/XendClient.py
--- a/tools/python/xen/xend/XendClient.py Mon Mar 6 16:09:18 2006
+++ b/tools/python/xen/xend/XendClient.py Mon Mar 6 17:21:35 2006
@@ -196,8 +196,9 @@
def xend_domains(self):
return self.xendGet(self.domainurl())
- def xend_list_domains(self):
- return self.xendGet(self.domainurl(), {'detail': '1'})
+ def xend_list_domains(self, detail = True):
+ return self.xendGet(self.domainurl(),
+ {'detail': detail and '1' or '0'})
def xend_domain_vcpuinfo(self, dom):
return self.xendGet(self.domainurl(dom), {'op': 'vcpuinfo'})
diff -r ede16886f979 -r c4ac21dc3f16 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py Mon Mar 6 16:09:18 2006
+++ b/tools/python/xen/xend/XendDomainInfo.py Mon Mar 6 17:21:35 2006
@@ -13,7 +13,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@xxxxxx>
-# Copyright (C) 2005 XenSource Ltd
+# Copyright (C) 2005, 2006 XenSource Ltd
#============================================================================
"""Representation of a single domain.
@@ -82,7 +82,7 @@
STATE_DOM_OK = 1
STATE_DOM_SHUTDOWN = 2
-SHUTDOWN_TIMEOUT = 30
+SHUTDOWN_TIMEOUT = 30.0
ZOMBIE_PREFIX = 'Zombie-'
@@ -182,7 +182,7 @@
vm.initDomain()
vm.storeVmDetails()
vm.storeDomDetails()
- vm.registerWatch()
+ vm.registerWatches()
vm.refreshShutdown()
return vm
except:
@@ -238,7 +238,7 @@
vm.storeVmDetails()
vm.storeDomDetails()
- vm.registerWatch()
+ vm.registerWatches()
vm.refreshShutdown(xeninfo)
return vm
@@ -443,7 +443,10 @@
self.console_mfn = None
self.vmWatch = None
-
+ self.shutdownWatch = None
+
+ self.shutdownStartTime = None
+
self.state = STATE_DOM_OK
self.state_updated = threading.Condition()
self.refresh_shutdown_lock = threading.Condition()
@@ -648,7 +651,7 @@
self.introduceDomain()
self.storeDomDetails()
- self.registerWatch()
+ self.registerWatches()
self.refreshShutdown()
log.debug("XendDomainInfo.completeRestore done")
@@ -711,13 +714,15 @@
## public:
- def registerWatch(self):
- """Register a watch on this VM's entries in the store, so that
- when they are changed externally, we keep up to date. This should
- only be called by {@link #create}, {@link #recreate}, or {@link
- #restore}, once the domain's details have been written, but before the
- new instance is returned."""
+ def registerWatches(self):
+ """Register a watch on this VM's entries in the store, and the
+ domain's control/shutdown node, so that when they are changed
+ externally, we keep up to date. This should only be called by {@link
+ #create}, {@link #recreate}, or {@link #restore}, once the domain's
+ details have been written, but before the new instance is returned."""
self.vmWatch = xswatch(self.vmpath, self.storeChanged)
+ self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
+ self.handleShutdownWatch)
def getDomid(self):
@@ -852,33 +857,49 @@
# Domain is alive. If we are shutting it down, then check
# the timeout on that, and destroy it if necessary.
- sst = self.readDom('xend/shutdown_start_time')
- if sst:
- sst = float(sst)
- timeout = SHUTDOWN_TIMEOUT - time.time() + sst
+ if self.shutdownStartTime:
+ timeout = (SHUTDOWN_TIMEOUT - time.time() +
+ self.shutdownStartTime)
if timeout < 0:
log.info(
"Domain shutdown timeout expired: name=%s id=%s",
self.info['name'], self.domid)
self.destroy()
- else:
- log.debug(
- "Scheduling refreshShutdown on domain %d in %ds.",
- self.domid, timeout)
- threading.Timer(timeout, self.refreshShutdown).start()
finally:
self.refresh_shutdown_lock.release()
if restart_reason:
self.maybeRestart(restart_reason)
+
+
+ def handleShutdownWatch(self, _):
+ log.debug('XendDomainInfo.handleShutdownWatch')
+
+ reason = self.readDom('control/shutdown')
+
+ if reason and reason != 'suspend':
+ sst = self.readDom('xend/shutdown_start_time')
+ now = time.time()
+ if sst:
+ self.shutdownStartTime = float(sst)
+ timeout = float(sst) + SHUTDOWN_TIMEOUT - now
+ else:
+ self.shutdownStartTime = now
+ self.storeDom('xend/shutdown_start_time', now)
+ timeout = SHUTDOWN_TIMEOUT
+
+ log.trace(
+ "Scheduling refreshShutdown on domain %d in %ds.",
+ self.domid, timeout)
+ threading.Timer(timeout, self.refreshShutdown).start()
+
+ return 1
def shutdown(self, reason):
if not reason in shutdown_reasons.values():
raise XendError('Invalid reason: %s' % reason)
self.storeDom("control/shutdown", reason)
- if reason != 'suspend':
- self.storeDom('xend/shutdown_start_time', time.time())
## private:
@@ -1225,6 +1246,8 @@
"""Cleanup domain resources; release devices. Idempotent. Nothrow
guarantee."""
+ self.unwatchShutdown()
+
self.release_devices()
if self.image:
@@ -1274,6 +1297,20 @@
self.vmWatch = None
except:
log.exception("Unwatching VM path failed.")
+
+
+ def unwatchShutdown(self):
+ """Remove the watch on the domain's control/shutdown node, if any.
+ Idempotent. Nothrow guarantee."""
+
+ try:
+ try:
+ if self.shutdownWatch:
+ self.shutdownWatch.unwatch()
+ finally:
+ self.shutdownWatch = None
+ except:
+ log.exception("Unwatching control/shutdown failed.")
## public:
diff -r ede16886f979 -r c4ac21dc3f16 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py Mon Mar 6 16:09:18 2006
+++ b/tools/python/xen/xend/image.py Mon Mar 6 17:21:35 2006
@@ -274,6 +274,10 @@
uname = sxp.child_value(info, 'uname')
typedev = sxp.child_value(info, 'dev')
(_, vbdparam) = string.split(uname, ':', 1)
+
+ if 'file:' in uname and not os.path.isfile(vbdparam):
+ raise VmError('Disk image does not exist: %s' % vbdparam)
+
if 'ioemu:' in typedev:
(emtype, vbddev) = string.split(typedev, ':', 1)
else:
diff -r ede16886f979 -r c4ac21dc3f16 tools/python/xen/xend/server/blkif.py
--- a/tools/python/xen/xend/server/blkif.py Mon Mar 6 16:09:18 2006
+++ b/tools/python/xen/xend/server/blkif.py Mon Mar 6 17:21:35 2006
@@ -42,10 +42,6 @@
"""@see DevController.getDeviceDetails"""
dev = sxp.child_value(config, 'dev')
- if 'ioemu:' in dev:
- return (None,{},{})
-
- devid = blkif.blkdev_name_to_number(dev)
(typ, params) = string.split(sxp.child_value(config, 'uname'), ':', 1)
back = { 'dev' : dev,
@@ -54,7 +50,13 @@
'mode' : sxp.child_value(config, 'mode', 'r')
}
- front = { 'virtual-device' : "%i" % devid }
+ if 'ioemu:' in dev:
+ (dummy, dev1) = string.split(dev, ':', 1)
+ devid = blkif.blkdev_name_to_number(dev1)
+ front = {}
+ else:
+ devid = blkif.blkdev_name_to_number(dev)
+ front = { 'virtual-device' : "%i" % devid }
return (devid, back, front)
diff -r ede16886f979 -r c4ac21dc3f16 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py Mon Mar 6 16:09:18 2006
+++ b/tools/python/xen/xm/main.py Mon Mar 6 17:21:35 2006
@@ -396,10 +396,8 @@
if args:
dominfo = map(server.xend_domain_vcpuinfo, args)
else:
- doms = server.xend_list_domains()
- dominfo = map(
- lambda x: server.xend_domain_vcpuinfo(sxp.child_value(x, 'name')),
- doms)
+ doms = server.xend_list_domains(False)
+ dominfo = map(server.xend_domain_vcpuinfo, doms)
print 'Name ID VCPU CPU State Time(s)
CPU Affinity'
diff -r ede16886f979 -r c4ac21dc3f16 tools/vtpm_manager/manager/dmictl.c
--- a/tools/vtpm_manager/manager/dmictl.c Mon Mar 6 16:09:18 2006
+++ b/tools/vtpm_manager/manager/dmictl.c Mon Mar 6 17:21:35 2006
@@ -74,7 +74,13 @@
close(dmi_res->guest_tx_fh); dmi_res->guest_tx_fh = -1;
close(dmi_res->vtpm_tx_fh); dmi_res->vtpm_tx_fh = -1;
-
+ vtpm_globals->connected_dmis--;
+
+ if (vtpm_globals->connected_dmis == 0) {
+ // No more DMI's connected. Close fifo to prevent a broken pipe.
+ close(vtpm_globals->guest_rx_fh);
+ vtpm_globals->guest_rx_fh = -1;
+ }
#ifndef MANUAL_DM_LAUNCH
if (dmi_res->dmi_id != VTPM_CTL_DM) {
if (dmi_res->dmi_pid != 0) {
@@ -118,6 +124,7 @@
status = TPM_BAD_PARAMETER;
goto abort_egress;
} else {
+ vtpm_globals->connected_dmis++; // Put this here so we don't count Dom0
BSG_UnpackList( param_buf->bytes, 3,
BSG_TYPE_BYTE, &type,
BSG_TYPE_UINT32, &domain_id,
diff -r ede16886f979 -r c4ac21dc3f16 tools/vtpm_manager/manager/securestorage.c
--- a/tools/vtpm_manager/manager/securestorage.c Mon Mar 6 16:09:18 2006
+++ b/tools/vtpm_manager/manager/securestorage.c Mon Mar 6 17:21:35 2006
@@ -307,8 +307,8 @@
TPM_RESULT status=TPM_SUCCESS;
int fh, dmis=-1;
- BYTE *flat_boot_key, *flat_dmis, *flat_enc;
- buffer_t clear_flat_global, enc_flat_global;
+ BYTE *flat_boot_key=NULL, *flat_dmis=NULL, *flat_enc=NULL;
+ buffer_t clear_flat_global=NULL_BUF, enc_flat_global=NULL_BUF;
UINT32 storageKeySize = buffer_len(&vtpm_globals->storageKeyWrap);
UINT32 bootKeySize = buffer_len(&vtpm_globals->bootKeyWrap);
struct pack_buf_t storage_key_pack = {storageKeySize,
vtpm_globals->storageKeyWrap.bytes};
@@ -328,12 +328,9 @@
sizeof(UINT32) +// storagekeysize
storageKeySize, NULL) ); //
storage key
- flat_dmis_size = (hashtable_count(vtpm_globals->dmi_map) - 1) * // num DMIS
(-1 for Dom0)
- (sizeof(UINT32) + 2*sizeof(TPM_DIGEST)); // Per DMI info
flat_boot_key = (BYTE *) malloc( boot_key_size );
flat_enc = (BYTE *) malloc( sizeof(UINT32) );
- flat_dmis = (BYTE *) malloc( flat_dmis_size );
boot_key_size = BSG_PackList(flat_boot_key, 1,
BSG_TPM_SIZE32_DATA, &boot_key_pack);
@@ -349,8 +346,12 @@
BSG_PackConst(buffer_len(&enc_flat_global), 4, flat_enc);
- // Per DMI values to be saved
+ // Per DMI values to be saved (if any exit)
if (hashtable_count(vtpm_globals->dmi_map) > 0) {
+
+ flat_dmis_size = (hashtable_count(vtpm_globals->dmi_map) - 1) * // num
DMIS (-1 for Dom0)
+ (sizeof(UINT32) + 2*sizeof(TPM_DIGEST)); // Per DMI info
+ flat_dmis = (BYTE *) malloc( flat_dmis_size );
dmi_itr = hashtable_iterator(vtpm_globals->dmi_map);
do {
diff -r ede16886f979 -r c4ac21dc3f16 tools/vtpm_manager/manager/vtpm_manager.c
--- a/tools/vtpm_manager/manager/vtpm_manager.c Mon Mar 6 16:09:18 2006
+++ b/tools/vtpm_manager/manager/vtpm_manager.c Mon Mar 6 17:21:35 2006
@@ -754,6 +754,7 @@
#ifndef VTPM_MULTI_VM
vtpm_globals->vtpm_rx_fh = -1;
vtpm_globals->guest_rx_fh = -1;
+ vtpm_globals->connected_dmis = 0;
#endif
if ((vtpm_globals->dmi_map = create_hashtable(10, hashfunc32, equals32)) ==
NULL){
status = TPM_FAIL;
diff -r ede16886f979 -r c4ac21dc3f16 tools/vtpm_manager/manager/vtpmpriv.h
--- a/tools/vtpm_manager/manager/vtpmpriv.h Mon Mar 6 16:09:18 2006
+++ b/tools/vtpm_manager/manager/vtpmpriv.h Mon Mar 6 17:21:35 2006
@@ -98,6 +98,7 @@
#ifndef VTPM_MULTI_VM
int vtpm_rx_fh;
int guest_rx_fh;
+ int connected_dmis; // Used to close guest_rx when no
dmis are connected
pid_t master_pid;
#endif
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/Makefile
--- a/tools/xenstore/Makefile Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/Makefile Mon Mar 6 17:21:35 2006
@@ -27,21 +27,27 @@
CLIENTS += xenstore-write
CLIENTS_OBJS := $(patsubst xenstore-%,xenstore_%.o,$(CLIENTS))
-all: libxenstore.so xenstored $(CLIENTS) xs_tdb_dump xenstore-ls
+all: libxenstore.so xenstored $(CLIENTS) xs_tdb_dump xenstore-control
xenstore-ls
+
+test_interleaved_transactions: test_interleaved_transactions.o
+ $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@
testcode: xs_test xenstored_test xs_random
-xenstored: xenstored_core.o xenstored_watch.o xenstored_domain.o
xenstored_transaction.o xs_lib.o talloc.o utils.o tdb.o
+xenstored: xenstored_core.o xenstored_watch.o xenstored_domain.o
xenstored_transaction.o xs_lib.o talloc.o utils.o tdb.o hashtable.o
$(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxenctrl -o $@
$(CLIENTS): xenstore-%: xenstore_%.o libxenstore.so
- $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@
+ $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@
$(CLIENTS_OBJS): xenstore_%.o: xenstore_client.c
$(COMPILE.c) -DCLIENT_$(*F) -o $@ $<
+xenstore-control: xenstore_control.o libxenstore.so
+ $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@
+
xenstore-ls: xsls.o libxenstore.so
- $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@
+ $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@
xenstored_test: xenstored_core_test.o xenstored_watch_test.o
xenstored_domain_test.o xenstored_transaction_test.o xs_lib.o talloc_test.o
fake_libxc.o utils.o tdb.o
$(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -o $@
@@ -77,7 +83,8 @@
clean: testsuite-clean
rm -f *.o *.opic *.so
rm -f xenstored xs_random xs_stress xs_crashme
- rm -f xs_test xenstored_test xs_tdb_dump xenstore-ls $(CLIENTS)
+ rm -f xs_test xenstored_test xs_tdb_dump xenstore-control xenstore-ls
+ rm -f $(CLIENTS)
$(RM) $(PROG_DEP)
print-dir:
@@ -129,7 +136,7 @@
tarball: clean
cd .. && tar -c -j -v -h -f xenstore.tar.bz2 xenstore/
-install: libxenstore.so xenstored xenstore-ls $(CLIENTS)
+install: all
$(INSTALL_DIR) -p $(DESTDIR)/var/run/xenstored
$(INSTALL_DIR) -p $(DESTDIR)/var/lib/xenstored
$(INSTALL_DIR) -p $(DESTDIR)/usr/bin
@@ -137,6 +144,7 @@
$(INSTALL_DIR) -p $(DESTDIR)/usr/include
$(INSTALL_PROG) xenstored $(DESTDIR)/usr/sbin
$(INSTALL_PROG) $(CLIENTS) $(DESTDIR)/usr/bin
+ $(INSTALL_PROG) xenstore-control $(DESTDIR)/usr/bin
$(INSTALL_PROG) xenstore-ls $(DESTDIR)/usr/bin
$(INSTALL_DIR) -p $(DESTDIR)/usr/$(LIBDIR)
$(INSTALL_DATA) libxenstore.so $(DESTDIR)/usr/$(LIBDIR)
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/xenstored_core.c
--- a/tools/xenstore/xenstored_core.c Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/xenstored_core.c Mon Mar 6 17:21:35 2006
@@ -51,14 +51,31 @@
#include "xenctrl.h"
#include "tdb.h"
+#include "hashtable.h"
+
+
extern int eventchn_fd; /* in xenstored_domain.c */
-static bool verbose;
+static bool verbose = false;
LIST_HEAD(connections);
static int tracefd = -1;
+static bool recovery = true;
+static bool remove_local = true;
static int reopen_log_pipe[2];
static char *tracefile = NULL;
static TDB_CONTEXT *tdb_ctx;
+
+static void corrupt(struct connection *conn, const char *fmt, ...);
+static void check_store();
+
+#define log(...) \
+ do { \
+ char *s = talloc_asprintf(NULL, __VA_ARGS__); \
+ trace("%s\n", s); \
+ syslog(LOG_ERR, "%s", s); \
+ talloc_free(s); \
+ } while (0)
+
#ifdef TESTING
static bool failtest = false;
@@ -103,33 +120,6 @@
#endif /* TESTING */
#include "xenstored_test.h"
-
-/* FIXME: Ideally, this should never be called. Some can be eliminated. */
-/* Something is horribly wrong: shutdown immediately. */
-void __attribute__((noreturn)) corrupt(struct connection *conn,
- const char *fmt, ...)
-{
- va_list arglist;
- char *str;
- int saved_errno = errno;
-
- va_start(arglist, fmt);
- str = talloc_vasprintf(NULL, fmt, arglist);
- va_end(arglist);
-
- trace("xenstored corruption: connection id %i: err %s: %s",
- conn ? (int)conn->id : -1, strerror(saved_errno), str);
- eprintf("xenstored corruption: connection id %i: err %s: %s",
- conn ? (int)conn->id : -1, strerror(saved_errno), str);
-#ifdef TESTING
- /* Allow them to attach debugger. */
- sleep(30);
-#endif
- syslog(LOG_DAEMON,
- "xenstored corruption: connection id %i: err %s: %s",
- conn ? (int)conn->id : -1, strerror(saved_errno), str);
- _exit(2);
-}
TDB_CONTEXT *tdb_context(struct connection *conn)
{
@@ -216,8 +206,9 @@
now = time(NULL);
tm = localtime(&now);
- trace("%s %p %02d:%02d:%02d %s (", prefix, conn,
- tm->tm_hour, tm->tm_min, tm->tm_sec,
+ trace("%s %p %04d%02d%02d %02d:%02d:%02d %s (", prefix, conn,
+ tm->tm_year + 1900, tm->tm_mon + 1,
+ tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec,
sockmsg_string(data->hdr.msg.type));
for (i = 0; i < data->hdr.msg.len; i++)
@@ -415,16 +406,19 @@
TDB_DATA key, data;
uint32_t *p;
struct node *node;
+ TDB_CONTEXT * context = tdb_context(conn);
key.dptr = (void *)name;
key.dsize = strlen(name);
- data = tdb_fetch(tdb_context(conn), key);
+ data = tdb_fetch(context, key);
if (data.dptr == NULL) {
- if (tdb_error(tdb_context(conn)) == TDB_ERR_NOEXIST)
+ if (tdb_error(context) == TDB_ERR_NOEXIST)
errno = ENOENT;
- else
+ else {
+ log("TDB error on read: %s", tdb_errorstr(context));
errno = EIO;
+ }
return NULL;
}
@@ -837,8 +831,6 @@
return 0;
}
-/* Be careful: create heirarchy, put entry in existing parent *last*.
- * This helps fsck if we die during this. */
static struct node *create_node(struct connection *conn,
const char *name,
void *data, unsigned int datalen)
@@ -939,8 +931,9 @@
{
unsigned int i;
- /* Delete self, then delete children. If something goes wrong,
- * consistency check will clean up this way. */
+ /* Delete self, then delete children. If we crash, then the worst
+ that can happen is the children will continue to take up space, but
+ will otherwise be unreachable. */
delete_node_single(conn, node);
/* Delete children, too. */
@@ -950,11 +943,17 @@
child = read_node(conn,
talloc_asprintf(node, "%s/%s", node->name,
node->children + i));
- if (!child)
- corrupt(conn, "No child '%s' found", child);
- delete_node(conn, child);
- }
-}
+ if (child) {
+ delete_node(conn, child);
+ }
+ else {
+ trace("delete_node: No child '%s/%s' found!\n",
+ node->name, node->children + i);
+ /* Skip it, we've already deleted the parent. */
+ }
+ }
+}
+
/* Delete memory using memmove. */
static void memdel(void *mem, unsigned off, unsigned len, unsigned total)
@@ -962,6 +961,17 @@
memmove(mem + off, mem + off + len, total - off - len);
}
+
+static bool remove_child_entry(struct connection *conn, struct node *node,
+ size_t offset)
+{
+ size_t childlen = strlen(node->children + offset);
+ memdel(node->children, offset, childlen + 1, node->childlen);
+ node->childlen -= childlen + 1;
+ return write_node(conn, node);
+}
+
+
static bool delete_child(struct connection *conn,
struct node *node, const char *childname)
{
@@ -969,19 +979,19 @@
for (i = 0; i < node->childlen; i += strlen(node->children+i) + 1) {
if (streq(node->children+i, childname)) {
- memdel(node->children, i, strlen(childname) + 1,
- node->childlen);
- node->childlen -= strlen(childname) + 1;
- return write_node(conn, node);
+ return remove_child_entry(conn, node, i);
}
}
corrupt(conn, "Can't find child '%s' in %s", childname, node->name);
+ return false;
}
static int _rm(struct connection *conn, struct node *node, const char *name)
{
- /* Delete from parent first, then if something explodes fsck cleans. */
+ /* Delete from parent first, then if we crash, the worst that can
+ happen is the child will continue to take up space, but will
+ otherwise be unreachable. */
struct node *parent = read_node(conn, get_parent(name));
if (!parent) {
send_error(conn, EINVAL);
@@ -1000,10 +1010,12 @@
static void internal_rm(const char *name)
{
- char *tname = talloc_strdup(talloc_autofree_context(), name);
+ char *tname = talloc_strdup(NULL, name);
struct node *node = read_node(NULL, tname);
if (node)
_rm(NULL, node, tname);
+ talloc_free(node);
+ talloc_free(tname);
}
@@ -1149,18 +1161,19 @@
case XS_DEBUG:
if (streq(in->buffer, "print"))
xprintf("debug: %s", in->buffer + get_string(in, 0));
+ if (streq(in->buffer, "check"))
+ check_store();
#ifdef TESTING
/* For testing, we allow them to set id. */
if (streq(in->buffer, "setid")) {
conn->id = atoi(in->buffer + get_string(in, 0));
- send_ack(conn, XS_DEBUG);
} else if (streq(in->buffer, "failtest")) {
if (get_string(in, 0) < in->used)
srandom(atoi(in->buffer + get_string(in, 0)));
- send_ack(conn, XS_DEBUG);
failtest = true;
}
#endif /* TESTING */
+ send_ack(conn, XS_DEBUG);
break;
case XS_WATCH:
@@ -1258,7 +1271,7 @@
if (in->hdr.msg.len > PATH_MAX) {
#ifndef TESTING
- syslog(LOG_DAEMON, "Client tried to feed us %i",
+ syslog(LOG_ERR, "Client tried to feed us %i",
in->hdr.msg.len);
#endif
goto bad_client;
@@ -1425,10 +1438,18 @@
balloon driver will pick up stale entries. In the case of
the balloon driver, this can be fatal.
*/
- char *tlocal = talloc_strdup(talloc_autofree_context(),
- "/local");
- internal_rm("/local");
- create_node(NULL, tlocal, NULL, 0);
+ char *tlocal = talloc_strdup(NULL, "/local");
+
+ check_store();
+
+ if (remove_local) {
+ internal_rm("/local");
+ create_node(NULL, tlocal, NULL, 0);
+
+ check_store();
+ }
+
+ talloc_free(tlocal);
}
else {
tdb_ctx = tdb_open(tdbname, 7919, TDB_FLAGS, O_RDWR|O_CREAT,
@@ -1439,10 +1460,196 @@
manual_node("/", "tool");
manual_node("/tool", "xenstored");
manual_node("/tool/xenstored", NULL);
- }
-
- /* FIXME: Fsck */
-}
+
+ check_store();
+ }
+}
+
+
+static unsigned int hash_from_key_fn(void *k)
+{
+ char *str = k;
+ unsigned int hash = 5381;
+ char c;
+
+ while ((c = *str++))
+ hash = ((hash << 5) + hash) + (unsigned int)c;
+
+ return hash;
+}
+
+
+static int keys_equal_fn(void *key1, void *key2)
+{
+ return 0 == strcmp((char *)key1, (char *)key2);
+}
+
+
+static char *child_name(const char *s1, const char *s2)
+{
+ if (strcmp(s1, "/")) {
+ return talloc_asprintf(NULL, "%s/%s", s1, s2);
+ }
+ else {
+ return talloc_asprintf(NULL, "/%s", s2);
+ }
+}
+
+
+static void remember_string(struct hashtable *hash, const char *str)
+{
+ char *k = malloc(strlen(str) + 1);
+ strcpy(k, str);
+ hashtable_insert(hash, k, (void *)1);
+}
+
+
+/**
+ * A node has a children field that names the children of the node, separated
+ * by NULs. We check whether there are entries in there that are duplicated
+ * (and if so, delete the second one), and whether there are any that do not
+ * have a corresponding child node (and if so, delete them). Each valid child
+ * is then recursively checked.
+ *
+ * No deleting is performed if the recovery flag is cleared (i.e. -R was
+ * passed on the command line).
+ *
+ * As we go, we record each node in the given reachable hashtable. These
+ * entries will be used later in clean_store.
+ */
+static void check_store_(const char *name, struct hashtable *reachable)
+{
+ struct node *node = read_node(NULL, name);
+
+ if (node) {
+ size_t i = 0;
+
+ struct hashtable * children =
+ create_hashtable(16, hash_from_key_fn, keys_equal_fn);
+
+ remember_string(reachable, name);
+
+ while (i < node->childlen) {
+ size_t childlen = strlen(node->children + i);
+ char * childname = child_name(node->name,
+ node->children + i);
+ struct node *childnode = read_node(NULL, childname);
+
+ if (childnode) {
+ if (hashtable_search(children, childname)) {
+ log("check_store: '%s' is duplicated!",
+ childname);
+
+ if (recovery) {
+ remove_child_entry(NULL, node,
+ i);
+ i -= childlen + 1;
+ }
+ }
+ else {
+ remember_string(children, childname);
+ check_store_(childname, reachable);
+ }
+ }
+ else {
+ log("check_store: No child '%s' found!\n",
+ childname);
+
+ if (recovery) {
+ remove_child_entry(NULL, node, i);
+ i -= childlen + 1;
+ }
+ }
+
+ talloc_free(childnode);
+ talloc_free(childname);
+ i += childlen + 1;
+ }
+
+ hashtable_destroy(children, 0 /* Don't free values (they are
+ all (void *)1) */);
+ talloc_free(node);
+ }
+ else {
+ /* Impossible, because no database should ever be without the
+ root, and otherwise, we've just checked in our caller
+ (which made a recursive call to get here). */
+
+ log("check_store: No child '%s' found: impossible!", name);
+ }
+}
+
+
+/**
+ * Helper to clean_store below.
+ */
+static int clean_store_(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA val,
+ void *private)
+{
+ struct hashtable *reachable = private;
+ char * name = talloc_strndup(NULL, key.dptr, key.dsize);
+
+ if (!hashtable_search(reachable, name)) {
+ log("clean_store: '%s' is orphaned!", name);
+ if (recovery) {
+ tdb_delete(tdb, key);
+ }
+ }
+
+ talloc_free(name);
+
+ return 0;
+}
+
+
+/**
+ * Given the list of reachable nodes, iterate over the whole store, and
+ * remove any that were not reached.
+ */
+static void clean_store(struct hashtable *reachable)
+{
+ tdb_traverse(tdb_ctx, &clean_store_, reachable);
+}
+
+
+static void check_store()
+{
+ char * root = talloc_strdup(NULL, "/");
+ struct hashtable * reachable =
+ create_hashtable(16, hash_from_key_fn, keys_equal_fn);
+
+ log("Checking store ...");
+ check_store_(root, reachable);
+ clean_store(reachable);
+ log("Checking store complete.");
+
+ hashtable_destroy(reachable, 0 /* Don't free values (they are all
+ (void *)1) */);
+ talloc_free(root);
+}
+
+
+/* Something is horribly wrong: check the store. */
+static void corrupt(struct connection *conn, const char *fmt, ...)
+{
+ va_list arglist;
+ char *str;
+ int saved_errno = errno;
+
+ va_start(arglist, fmt);
+ str = talloc_vasprintf(NULL, fmt, arglist);
+ va_end(arglist);
+
+ log("corruption detected by connection %i: err %s: %s",
+ conn ? (int)conn->id : -1, strerror(saved_errno), str);
+
+#ifdef TESTING
+ /* Allow them to attach debugger. */
+ sleep(30);
+#endif
+ check_store();
+}
+
static void write_pidfile(const char *pidfile)
{
@@ -1506,6 +1713,9 @@
" --no-fork to request that the daemon does not fork,\n"
" --output-pid to request that the pid of the daemon is output,\n"
" --trace-file <file> giving the file for logging, and\n"
+" --no-recovery to request that no recovery should be attempted when\n"
+" the store is corrupted (debug only),\n"
+" --preserve-local to request that /local is preserved on start-up,\n"
" --verbose to request verbose execution.\n");
}
@@ -1517,6 +1727,8 @@
{ "no-fork", 0, NULL, 'N' },
{ "output-pid", 0, NULL, 'P' },
{ "trace-file", 1, NULL, 'T' },
+ { "no-recovery", 0, NULL, 'R' },
+ { "preserve-local", 0, NULL, 'L' },
{ "verbose", 0, NULL, 'V' },
{ NULL, 0, NULL, 0 } };
@@ -1532,7 +1744,7 @@
bool no_domain_init = false;
const char *pidfile = NULL;
- while ((opt = getopt_long(argc, argv, "DF:HNPT:V", options,
+ while ((opt = getopt_long(argc, argv, "DF:HNPT:RLV", options,
NULL)) != -1) {
switch (opt) {
case 'D':
@@ -1550,6 +1762,12 @@
case 'P':
outputpid = true;
break;
+ case 'R':
+ recovery = false;
+ break;
+ case 'L':
+ remove_local = false;
+ break;
case 'T':
tracefile = optarg;
break;
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/xenstored_core.h
--- a/tools/xenstore/xenstored_core.h Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/xenstored_core.h Mon Mar 6 17:21:35 2006
@@ -148,10 +148,6 @@
/* Replace the tdb: required for transaction code */
bool replace_tdb(const char *newname, TDB_CONTEXT *newtdb);
-/* Fail due to excessive corruption, capitalist pigdogs! */
-void __attribute__((noreturn)) corrupt(struct connection *conn,
- const char *fmt, ...);
-
struct connection *new_connection(connwritefn_t *write, connreadfn_t *read);
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/xenstored_domain.c
--- a/tools/xenstore/xenstored_domain.c Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/xenstored_domain.c Mon Mar 6 17:21:35 2006
@@ -27,7 +27,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <paths.h>
//#define DEBUG
#include "utils.h"
@@ -466,21 +465,8 @@
{
int rc, fd;
evtchn_port_t port;
- unsigned long kva;
char str[20];
struct domain *dom0;
-
- fd = open(XENSTORED_PROC_KVA, O_RDONLY);
- if (fd == -1)
- return -1;
-
- rc = read(fd, str, sizeof(str));
- if (rc == -1)
- goto outfd;
- str[rc] = '\0';
- kva = strtoul(str, NULL, 0);
-
- close(fd);
fd = open(XENSTORED_PROC_PORT, O_RDONLY);
if (fd == -1)
@@ -496,12 +482,12 @@
dom0 = new_domain(NULL, 0, port);
- fd = open(_PATH_KMEM, O_RDWR);
+ fd = open(XENSTORED_PROC_KVA, O_RDWR);
if (fd == -1)
return -1;
dom0->interface = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE,
- MAP_SHARED, fd, kva);
+ MAP_SHARED, fd, 0);
if (dom0->interface == MAP_FAILED)
goto outfd;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/ia64/xen/dom0_ops.c Mon Mar 6 17:21:35 2006
@@ -16,10 +16,11 @@
#include <asm/pdb.h>
#include <xen/trace.h>
#include <xen/console.h>
+#include <xen/guest_access.h>
#include <public/sched_ctl.h>
#include <asm/vmx.h>
-long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
+long arch_do_dom0_op(dom0_op_t *op, GUEST_HANDLE(dom0_op_t) u_dom0_op)
{
long ret = 0;
@@ -64,7 +65,7 @@
put_domain(d);
- copy_to_user(u_dom0_op, op, sizeof(*op));
+ copy_to_guest(u_dom0_op, op, 1);
}
break;
@@ -74,7 +75,6 @@
int n,j;
int num = op->u.getpageframeinfo2.num;
domid_t dom = op->u.getpageframeinfo2.domain;
- unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
struct domain *d;
unsigned long *l_arr;
ret = -ESRCH;
@@ -95,7 +95,8 @@
{
int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
- if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
+ if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array,
+ n, k) )
{
ret = -EINVAL;
break;
@@ -135,7 +136,8 @@
}
- if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
+ if ( copy_to_guest_offset(op->u.getpageframeinfo2.array,
+ n, l_arr, k) )
{
ret = -EINVAL;
break;
@@ -160,7 +162,6 @@
unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
unsigned long mfn;
- unsigned long *buffer = op->u.getmemlist.buffer;
ret = -EINVAL;
if ( d != NULL )
@@ -180,16 +181,16 @@
{
mfn = gmfn_to_mfn_foreign(d, i);
- if ( put_user(mfn, buffer) )
+ if ( copy_to_guest_offset(op->u.getmemlist.buffer,
+ i - start_page, &mfn, 1) )
{
ret = -EFAULT;
break;
}
- buffer++;
}
op->u.getmemlist.num_pfns = i - start_page;
- copy_to_user(u_dom0_op, op, sizeof(*op));
+ copy_to_guest(u_dom0_op, op, 1);
put_domain(d);
}
@@ -211,7 +212,7 @@
memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
//memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
ret = 0;
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/ia64/xen/xensetup.c Mon Mar 6 17:21:35 2006
@@ -12,7 +12,7 @@
#include <xen/sched.h>
#include <xen/mm.h>
#include <public/version.h>
-//#include <xen/delay.h>
+#include <xen/gdbstub.h>
#include <xen/compile.h>
#include <xen/console.h>
#include <xen/serial.h>
@@ -341,6 +341,8 @@
printk("Brought up %ld CPUs\n", (long)num_online_cpus());
smp_cpus_done(max_cpus);
#endif
+
+ initialise_gdb(); /* could be moved earlier */
do_initcalls();
printk("About to call sort_main_extable()\n");
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/boot/mkelf32.c
--- a/xen/arch/x86/boot/mkelf32.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/boot/mkelf32.c Mon Mar 6 17:21:35 2006
@@ -244,8 +244,8 @@
inimage = argv[1];
outimage = argv[2];
- loadbase = strtoull(argv[3], NULL, 16);
- final_exec_addr = strtoul(argv[4], NULL, 16);
+ loadbase = strtoul(argv[3], NULL, 16);
+ final_exec_addr = strtoull(argv[4], NULL, 16);
infd = open(inimage, O_RDONLY);
if ( infd == -1 )
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/dom0_ops.c Mon Mar 6 17:21:35 2006
@@ -10,6 +10,7 @@
#include <xen/types.h>
#include <xen/lib.h>
#include <xen/mm.h>
+#include <xen/guest_access.h>
#include <public/dom0_ops.h>
#include <xen/sched.h>
#include <xen/event.h>
@@ -48,7 +49,7 @@
(void)rdmsr_safe(msr_addr, msr_lo, msr_hi);
}
-long arch_do_dom0_op(struct dom0_op *op, struct dom0_op *u_dom0_op)
+long arch_do_dom0_op(struct dom0_op *op, GUEST_HANDLE(dom0_op_t) u_dom0_op)
{
long ret = 0;
@@ -75,7 +76,7 @@
op->u.msr.out1 = msr_lo;
op->u.msr.out2 = msr_hi;
- copy_to_user(u_dom0_op, op, sizeof(*op));
+ copy_to_guest(u_dom0_op, op, 1);
}
ret = 0;
}
@@ -90,7 +91,7 @@
{
ret = shadow_mode_control(d, &op->u.shadow_control);
put_domain(d);
- copy_to_user(u_dom0_op, op, sizeof(*op));
+ copy_to_guest(u_dom0_op, op, 1);
}
}
break;
@@ -102,10 +103,11 @@
op->u.add_memtype.nr_mfns,
op->u.add_memtype.type,
1);
- if (ret > 0)
- {
- (void)__put_user(0, &u_dom0_op->u.add_memtype.handle);
- (void)__put_user(ret, &u_dom0_op->u.add_memtype.reg);
+ if ( ret > 0 )
+ {
+ op->u.add_memtype.handle = 0;
+ op->u.add_memtype.reg = ret;
+ (void)copy_to_guest(u_dom0_op, op, 1);
ret = 0;
}
}
@@ -136,9 +138,10 @@
if ( op->u.read_memtype.reg < num_var_ranges )
{
mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type);
- (void)__put_user(mfn, &u_dom0_op->u.read_memtype.mfn);
- (void)__put_user(nr_mfns, &u_dom0_op->u.read_memtype.nr_mfns);
- (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
+ op->u.read_memtype.mfn = mfn;
+ op->u.read_memtype.nr_mfns = nr_mfns;
+ op->u.read_memtype.type = type;
+ (void)copy_to_guest(u_dom0_op, op, 1);
ret = 0;
}
}
@@ -147,7 +150,7 @@
case DOM0_MICROCODE:
{
extern int microcode_update(void *buf, unsigned long len);
- ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
+ ret = microcode_update(op->u.microcode.data.p, op->u.microcode.length);
}
break;
@@ -195,7 +198,7 @@
memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
ret = 0;
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
@@ -245,7 +248,7 @@
put_domain(d);
- copy_to_user(u_dom0_op, op, sizeof(*op));
+ copy_to_guest(u_dom0_op, op, 1);
}
break;
@@ -255,7 +258,6 @@
int n,j;
int num = op->u.getpageframeinfo2.num;
domid_t dom = op->u.getpageframeinfo2.domain;
- unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
struct domain *d;
unsigned long *l_arr;
ret = -ESRCH;
@@ -277,7 +279,8 @@
{
int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
- if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
+ if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array,
+ n, k) )
{
ret = -EINVAL;
break;
@@ -320,7 +323,8 @@
}
- if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
+ if ( copy_to_guest_offset(op->u.getpageframeinfo2.array,
+ n, l_arr, k) )
{
ret = -EINVAL;
break;
@@ -341,7 +345,6 @@
struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
unsigned long max_pfns = op->u.getmemlist.max_pfns;
unsigned long mfn;
- unsigned long *buffer = op->u.getmemlist.buffer;
struct list_head *list_ent;
ret = -EINVAL;
@@ -353,19 +356,20 @@
list_ent = d->page_list.next;
for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
{
- mfn = page_to_mfn(list_entry(list_ent, struct page_info,
list));
- if ( put_user(mfn, buffer) )
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ if ( copy_to_guest_offset(op->u.getmemlist.buffer,
+ i, &mfn, 1) )
{
ret = -EFAULT;
break;
}
- buffer++;
list_ent = mfn_to_page(mfn)->list.next;
}
spin_unlock(&d->page_alloc_lock);
op->u.getmemlist.num_pfns = i;
- copy_to_user(u_dom0_op, op, sizeof(*op));
+ copy_to_guest(u_dom0_op, op, 1);
put_domain(d);
}
@@ -401,13 +405,12 @@
entry.start = e820.map[i].addr;
entry.end = e820.map[i].addr + e820.map[i].size;
entry.is_ram = (e820.map[i].type == E820_RAM);
- (void)copy_to_user(
- &op->u.physical_memory_map.memory_map[i],
- &entry, sizeof(entry));
+ (void)copy_to_guest_offset(
+ op->u.physical_memory_map.memory_map, i, &entry, 1);
}
op->u.physical_memory_map.nr_map_entries = i;
- (void)copy_to_user(u_dom0_op, op, sizeof(*op));
+ (void)copy_to_guest(u_dom0_op, op, 1);
}
break;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/hvm/svm/svm.c Mon Mar 6 17:21:35 2006
@@ -247,6 +247,7 @@
void svm_restore_msrs(struct vcpu *v)
{
}
+#endif
#define IS_CANO_ADDRESS(add) 1
@@ -297,7 +298,7 @@
return 0;
}
- HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n",
+ HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n",
msr_content);
regs->eax = msr_content & 0xffffffff;
@@ -311,12 +312,14 @@
struct vcpu *vc = current;
struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
- HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx msr_content %lx\n",
- regs->ecx, msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx "
+ "msr_content %"PRIx64"\n",
+ (unsigned long)regs->ecx, msr_content);
switch (regs->ecx)
{
case MSR_EFER:
+#ifdef __x86_64__
if ((msr_content & EFER_LME) ^ test_bit(SVM_CPU_STATE_LME_ENABLED,
&vc->arch.hvm_svm.cpu_state))
{
@@ -337,6 +340,7 @@
if ((msr_content ^ vmcb->efer) & EFER_LME)
msr_content &= ~EFER_LME;
/* No update for LME/LMA since it have no effect */
+#endif
vmcb->efer = msr_content | EFER_SVME;
break;
@@ -382,18 +386,6 @@
}
return 1;
}
-
-#else
-static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
-{
- return 0;
-}
-
-static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
-{
- return 0;
-}
-#endif
void svm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
{
@@ -752,7 +744,8 @@
/* unmap IO shared page */
struct domain *d = v->domain;
if ( d->arch.hvm_domain.shared_page_va )
- unmap_domain_page((void *)d->arch.hvm_domain.shared_page_va);
+ unmap_domain_page_global(
+ (void *)d->arch.hvm_domain.shared_page_va);
shadow_direct_map_clean(d);
}
@@ -937,10 +930,8 @@
if (input == 1)
{
-#ifndef __x86_64__
if ( hvm_apic_support(v->domain) &&
!vlapic_global_enabled((VLAPIC(v))) )
-#endif
clear_bit(X86_FEATURE_APIC, &edx);
#if CONFIG_PAGING_LEVELS < 3
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Mar 6 17:21:35 2006
@@ -88,9 +88,6 @@
#define STGI .byte 0x0F,0x01,0xDC
#define CLGI .byte 0x0F,0x01,0xDD
-#define DO_TSC_OFFSET 0
-#define DO_FPUSAVE 0
-
ENTRY(svm_asm_do_launch)
sti
CLGI
@@ -100,36 +97,6 @@
movl %eax, VMCB_rax(%ecx)
movl VCPU_svm_hsa_pa(%ebx), %eax
VMSAVE
-
-#if DO_FPUSAVE
- mov %cr0, %eax
- push %eax
- clts
- lea VCPU_arch_guest_fpu_ctxt(%ebx), %eax
- fxrstor (%eax)
- pop %eax
- mov %eax, %cr0
-#endif
-
-#if (DO_TSC_OFFSET)
- pushl %edx /* eax and edx get trashed by rdtsc */
- pushl %eax
- rdtsc
- subl VCPU_svm_vmexit_tsc(%ebx),%eax /* tsc's from */
- sbbl VCPU_svm_vmexit_tsc+4(%ebx),%edx /* last #VMEXIT? */
- subl %eax,VMCB_tsc_offset(%ecx) /* subtract from running TSC_OFFSET */
- sbbl %edx,VMCB_tsc_offset+4(%ecx)
- subl $20000,VMCB_tsc_offset(%ecx) /* fudge factor for VMXXX calls */
- sbbl $0,VMCB_tsc_offset+4(%ecx)
-
- /*
- * TODO: may need to add a kludge factor to account for all the cycles
- * burned in VMLOAD, VMSAVE, VMRUN...
- */
-
- popl %eax
- popl %edx
- #endif
movl VCPU_svm_vmcb_pa(%ebx), %eax
popl %ebx
@@ -150,31 +117,7 @@
VMSAVE
/* eax is the only register we're allowed to touch here... */
-#if DO_FPUSAVE
- mov %cr0, %eax
- push %eax
- clts
GET_CURRENT(%eax)
- lea VCPU_arch_guest_fpu_ctxt(%eax), %eax
- fxsave (%eax)
- fnclex
- pop %eax
- mov %eax, %cr0
-#endif
-
- GET_CURRENT(%eax)
-
-#if (DO_TSC_OFFSET)
- pushl %edx
- pushl %ebx
- movl %eax,%ebx
- rdtsc
- movl %eax,VCPU_svm_vmexit_tsc(%ebx)
- movl %edx,VCPU_svm_vmexit_tsc+4(%ebx)
- movl %ebx,%eax
- popl %ebx
- popl %edx
-#endif
movl VCPU_svm_hsa_pa(%eax), %eax
VMLOAD
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/hvm/vioapic.c Mon Mar 6 17:21:35 2006
@@ -52,20 +52,6 @@
s->flags &= ~IOAPIC_ENABLE_FLAG;
}
-static void ioapic_dump_redir(hvm_vioapic_t *s, uint8_t entry)
-{
- RedirStatus redir = s->redirtbl[entry];
-
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_dump_redir "
- "entry %x vector %x deliver_mod %x destmode %x delivestatus %x "
- "polarity %x remote_irr %x trigmod %x mask %x dest_id %x\n",
- entry, redir.RedirForm.vector, redir.RedirForm.deliver_mode,
- redir.RedirForm.destmode, redir.RedirForm.delivestatus,
- redir.RedirForm.polarity, redir.RedirForm.remoteirr,
- redir.RedirForm.trigmod, redir.RedirForm.mask,
- redir.RedirForm.dest_id);
-}
-
#ifdef HVM_DOMAIN_SAVE_RESTORE
void ioapic_save(QEMUFile* f, void* opaque)
{
@@ -534,7 +520,19 @@
if (!IOAPICEnabled(s) || s->redirtbl[irq].RedirForm.mask)
return;
- ioapic_dump_redir(s, irq);
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "hvm_vioapic_set_irq entry %x "
+ "vector %x deliver_mod %x destmode %x delivestatus %x "
+ "polarity %x remote_irr %x trigmod %x mask %x dest_id %x\n",
+ irq,
+ s->redirtbl[irq].RedirForm.vector,
+ s->redirtbl[irq].RedirForm.deliver_mode,
+ s->redirtbl[irq].RedirForm.destmode,
+ s->redirtbl[irq].RedirForm.delivestatus,
+ s->redirtbl[irq].RedirForm.polarity,
+ s->redirtbl[irq].RedirForm.remoteirr,
+ s->redirtbl[irq].RedirForm.trigmod,
+ s->redirtbl[irq].RedirForm.mask,
+ s->redirtbl[irq].RedirForm.dest_id);
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
uint32_t bit = 1 << irq;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Mar 6 17:21:35 2006
@@ -172,7 +172,7 @@
switch(regs->ecx){
case MSR_EFER:
msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
- HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long
long)msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %"PRIx64"\n", msr_content);
if (test_bit(VMX_CPU_STATE_LME_ENABLED,
&vc->arch.hvm_vmx.cpu_state))
msr_content |= 1 << _EFER_LME;
@@ -202,7 +202,8 @@
default:
return 0;
}
- HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n",
msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n",
+ msr_content);
regs->eax = msr_content & 0xffffffff;
regs->edx = msr_content >> 32;
return 1;
@@ -216,8 +217,9 @@
struct vmx_msr_state * host_state =
&percpu_msr[smp_processor_id()];
- HVM_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n",
- regs->ecx, msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx "
+ "msr_content %"PRIx64"\n",
+ (unsigned long)regs->ecx, msr_content);
switch (regs->ecx){
case MSR_EFER:
@@ -882,7 +884,7 @@
__vmread(GUEST_RFLAGS, &eflags);
vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
- HVM_DBG_LOG(DBG_LEVEL_1,
+ HVM_DBG_LOG(DBG_LEVEL_IO,
"vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
"exit_qualification = %lx",
vm86, cs, eip, exit_qualification);
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/io_apic.c Mon Mar 6 17:21:35 2006
@@ -1548,8 +1548,9 @@
*/
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
- timer_ack = 1;
- enable_8259A_irq(0);
+ /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */
+ /*timer_ack = 1;*/
+ /*enable_8259A_irq(0);*/
pin1 = find_isa_irq_pin(0, mp_INT);
apic1 = find_isa_irq_apic(0, mp_INT);
@@ -1617,7 +1618,7 @@
printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
- timer_ack = 0;
+ /*timer_ack = 0;*/
init_8259A(0);
make_8259A_irq(0);
apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
@@ -1631,16 +1632,6 @@
printk(" failed :(.\n");
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option");
-}
-
-#define NR_IOAPIC_BIOSIDS 256
-static u8 ioapic_biosid_to_apic_enum[NR_IOAPIC_BIOSIDS];
-static void store_ioapic_biosid_mapping(void)
-{
- u8 apic;
- memset(ioapic_biosid_to_apic_enum, ~0, NR_IOAPIC_BIOSIDS);
- for ( apic = 0; apic < nr_ioapics; apic++ )
- ioapic_biosid_to_apic_enum[mp_ioapics[apic].mpc_apicid] = apic;
}
/*
@@ -1654,8 +1645,6 @@
void __init setup_IO_APIC(void)
{
- store_ioapic_biosid_mapping();
-
enable_IO_APIC();
if (acpi_ioapic)
@@ -1839,50 +1828,45 @@
#endif /*CONFIG_ACPI_BOOT*/
-
-int ioapic_guest_read(int apicid, int address, u32 *pval)
-{
- u32 val;
- int apicenum;
- union IO_APIC_reg_00 reg_00;
+static int ioapic_physbase_to_id(unsigned long physbase)
+{
+ int apic;
+ for ( apic = 0; apic < nr_ioapics; apic++ )
+ if ( mp_ioapics[apic].mpc_apicaddr == physbase )
+ return apic;
+ return -EINVAL;
+}
+
+int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval)
+{
+ int apic;
unsigned long flags;
- if ( (apicid >= NR_IOAPIC_BIOSIDS) ||
- ((apicenum = ioapic_biosid_to_apic_enum[apicid]) >= nr_ioapics) )
- return -EINVAL;
+ if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
+ return apic;
spin_lock_irqsave(&ioapic_lock, flags);
- val = io_apic_read(apicenum, address);
+ *pval = io_apic_read(apic, reg);
spin_unlock_irqrestore(&ioapic_lock, flags);
- /* Rewrite APIC ID to what the BIOS originally specified. */
- if ( address == 0 )
- {
- reg_00.raw = val;
- reg_00.bits.ID = apicid;
- val = reg_00.raw;
- }
-
- *pval = val;
return 0;
}
-int ioapic_guest_write(int apicid, int address, u32 val)
-{
- int apicenum, pin, irq;
+int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
+{
+ int apic, pin, irq;
struct IO_APIC_route_entry rte = { 0 };
struct irq_pin_list *entry;
unsigned long flags;
- if ( (apicid >= NR_IOAPIC_BIOSIDS) ||
- ((apicenum = ioapic_biosid_to_apic_enum[apicid]) >= nr_ioapics) )
- return -EINVAL;
+ if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
+ return apic;
/* Only write to the first half of a route entry. */
- if ( (address < 0x10) || (address & 1) )
+ if ( (reg < 0x10) || (reg & 1) )
return 0;
- pin = (address - 0x10) >> 1;
+ pin = (reg - 0x10) >> 1;
*(u32 *)&rte = val;
rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
@@ -1898,7 +1882,7 @@
if ( rte.delivery_mode > dest_LowestPrio )
{
printk("ERROR: Attempt to write weird IOAPIC destination mode!\n");
- printk(" APIC=%d/%d, lo-reg=%x\n", apicid, pin, val);
+ printk(" APIC=%d/%d, lo-reg=%x\n", apic, pin, val);
return -EINVAL;
}
@@ -1923,19 +1907,19 @@
/* Record the pin<->irq mapping. */
for ( entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next] )
{
- if ( (entry->apic == apicenum) && (entry->pin == pin) )
+ if ( (entry->apic == apic) && (entry->pin == pin) )
break;
if ( !entry->next )
{
- add_pin_to_irq(irq, apicenum, pin);
+ add_pin_to_irq(irq, apic, pin);
break;
}
}
}
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(apicenum, 0x10 + 2 * pin, *(((int *)&rte) + 0));
- io_apic_write(apicenum, 0x11 + 2 * pin, *(((int *)&rte) + 1));
+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0));
+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1));
spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/microcode.c
--- a/xen/arch/x86/microcode.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/microcode.c Mon Mar 6 17:21:35 2006
@@ -116,7 +116,7 @@
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
/* serialize access to the physical write to MSR 0x79 */
-static spinlock_t microcode_update_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(microcode_update_lock);
/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
static DECLARE_MUTEX(microcode_sem);
@@ -166,7 +166,8 @@
}
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
+ /* see notes above for revision 1.07. Apparent chip bug */
+ sync_core();
/* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], uci->rev);
pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
@@ -366,7 +367,7 @@
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
if (uci->mc == NULL) {
- printk(KERN_INFO "microcode: No suitable data for CPU%d\n",
cpu_num);
+ printk(KERN_INFO "microcode: No new microcode data for
CPU%d\n", cpu_num);
return;
}
@@ -379,7 +380,9 @@
(unsigned long) uci->mc->bits >> 16 >> 16);
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
+ /* see notes above for revision 1.07. Apparent chip bug */
+ sync_core();
+
/* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/mm.c Mon Mar 6 17:21:35 2006
@@ -506,7 +506,6 @@
vaddr <<= PGT_va_shift;
rc = get_page_and_type_from_pagenr(
l2e_get_pfn(l2e), PGT_l1_page_table | vaddr, d);
-
#if CONFIG_PAGING_LEVELS == 2
if ( unlikely(!rc) )
rc = get_linear_pagetable(l2e, pfn, d);
@@ -3187,8 +3186,8 @@
ptwr_flush(d, PTWR_PT_INACTIVE);
/* Read the PTE that maps the page being updated. */
- if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)],
- sizeof(pte)))
+ if ( __copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)],
+ sizeof(pte)) )
{
MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table");
return X86EMUL_UNHANDLEABLE;
@@ -3198,15 +3197,10 @@
page = mfn_to_page(pfn);
/* We are looking only for read-only mappings of p.t. pages. */
- if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
- ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
- (page_get_owner(page) != d) )
- {
- MEM_LOG("ptwr_emulate: Page is mistyped or bad pte "
- "(%lx, %" PRtype_info ")",
- l1e_get_pfn(pte), page->u.inuse.type_info);
- return X86EMUL_UNHANDLEABLE;
- }
+ ASSERT((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) == _PAGE_PRESENT);
+ ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table);
+ ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0);
+ ASSERT(page_get_owner(page) == d);
/* Check the new PTE. */
nl1e = l1e_from_intpte(val);
@@ -3266,8 +3260,11 @@
unsigned long new,
unsigned long new_hi)
{
- return ptwr_emulated_update(
- addr, ((u64)old_hi << 32) | old, ((u64)new_hi << 32) | new, 8, 1);
+ if ( CONFIG_PAGING_LEVELS == 2 )
+ return X86EMUL_UNHANDLEABLE;
+ else
+ return ptwr_emulated_update(
+ addr, ((u64)old_hi << 32) | old, ((u64)new_hi << 32) | new, 8, 1);
}
static struct x86_mem_emulator ptwr_mem_emulator = {
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/nmi.c
--- a/xen/arch/x86/nmi.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/nmi.c Mon Mar 6 17:21:35 2006
@@ -322,15 +322,9 @@
case X86_VENDOR_INTEL:
switch (boot_cpu_data.x86) {
case 6:
- if (boot_cpu_data.x86_model > 0xd)
- return;
-
setup_p6_watchdog();
break;
case 15:
- if (boot_cpu_data.x86_model > 0x4)
- return;
-
if (!setup_p4_watchdog())
return;
break;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/physdev.c Mon Mar 6 17:21:35 2006
@@ -11,8 +11,12 @@
#include <public/xen.h>
#include <public/physdev.h>
-extern int ioapic_guest_read(int apicid, int address, u32 *pval);
-extern int ioapic_guest_write(int apicid, int address, u32 pval);
+extern int
+ioapic_guest_read(
+ unsigned long physbase, unsigned int reg, u32 *pval);
+extern int
+ioapic_guest_write(
+ unsigned long physbase, unsigned int reg, u32 pval);
/*
* Demuxing hypercall.
@@ -49,7 +53,9 @@
if ( !IS_PRIV(current->domain) )
break;
ret = ioapic_guest_read(
- op.u.apic_op.apic, op.u.apic_op.offset, &op.u.apic_op.value);
+ op.u.apic_op.apic_physbase,
+ op.u.apic_op.reg,
+ &op.u.apic_op.value);
break;
case PHYSDEVOP_APIC_WRITE:
@@ -57,7 +63,9 @@
if ( !IS_PRIV(current->domain) )
break;
ret = ioapic_guest_write(
- op.u.apic_op.apic, op.u.apic_op.offset, op.u.apic_op.value);
+ op.u.apic_op.apic_physbase,
+ op.u.apic_op.reg,
+ op.u.apic_op.value);
break;
case PHYSDEVOP_ASSIGN_VECTOR:
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/setup.c Mon Mar 6 17:21:35 2006
@@ -13,6 +13,7 @@
#include <xen/multiboot.h>
#include <xen/domain_page.h>
#include <xen/compile.h>
+#include <xen/gdbstub.h>
#include <public/version.h>
#include <asm/bitops.h>
#include <asm/smp.h>
@@ -479,6 +480,8 @@
printk("Brought up %ld CPUs\n", (long)num_online_cpus());
smp_cpus_done(max_cpus);
+ initialise_gdb(); /* could be moved earlier */
+
do_initcalls();
schedulers_start();
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/shadow.c Mon Mar 6 17:21:35 2006
@@ -279,8 +279,8 @@
psh_type == PGT_l4_shadow ) /* allocated for PAE PDP page */
page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA);
else if ( d->arch.ops->guest_paging_levels == PAGING_L3 &&
- psh_type == PGT_l3_shadow ) /* allocated for PAE PDP page */
- page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA);
+ (psh_type == PGT_l3_shadow || psh_type == PGT_l4_shadow) )
+ page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA); /* allocated
for PAE PDP page */
else
page = alloc_domheap_page(NULL);
#endif
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/shadow32.c Mon Mar 6 17:21:35 2006
@@ -29,6 +29,7 @@
#include <xen/event.h>
#include <xen/sched.h>
#include <xen/trace.h>
+#include <xen/guest_access.h>
#define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
#define va_to_l1mfn(_ed, _va) \
@@ -1508,14 +1509,14 @@
d->arch.shadow_fault_count = 0;
d->arch.shadow_dirty_count = 0;
- if ( (sc->dirty_bitmap == NULL) ||
+ if ( guest_handle_is_null(sc->dirty_bitmap) ||
(d->arch.shadow_dirty_bitmap == NULL) )
{
rc = -EINVAL;
break;
}
- if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+ if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
sc->pages = d->arch.shadow_dirty_bitmap_size;
#define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
@@ -1524,10 +1525,10 @@
int bytes = ((((sc->pages - i) > chunk) ?
chunk : (sc->pages - i)) + 7) / 8;
- if (copy_to_user(
- sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
- d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
- bytes))
+ if ( copy_to_guest_offset(
+ sc->dirty_bitmap, i/(8*sizeof(unsigned long)),
+ d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
+ (bytes+sizeof(unsigned long)-1) / sizeof(unsigned long)) )
{
rc = -EINVAL;
break;
@@ -1544,18 +1545,20 @@
sc->stats.fault_count = d->arch.shadow_fault_count;
sc->stats.dirty_count = d->arch.shadow_dirty_count;
- if ( (sc->dirty_bitmap == NULL) ||
+ if ( guest_handle_is_null(sc->dirty_bitmap) ||
(d->arch.shadow_dirty_bitmap == NULL) )
{
rc = -EINVAL;
break;
}
- if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+ if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
sc->pages = d->arch.shadow_dirty_bitmap_size;
- if (copy_to_user(sc->dirty_bitmap,
- d->arch.shadow_dirty_bitmap, (sc->pages+7)/8))
+ if ( copy_to_guest(sc->dirty_bitmap,
+ d->arch.shadow_dirty_bitmap,
+ (((sc->pages+7)/8)+sizeof(unsigned long)-1) /
+ sizeof(unsigned long)) )
{
rc = -EINVAL;
break;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/shadow_public.c Mon Mar 6 17:21:35 2006
@@ -29,6 +29,7 @@
#include <xen/event.h>
#include <xen/sched.h>
#include <xen/trace.h>
+#include <xen/guest_access.h>
#include <asm/shadow_64.h>
static int alloc_p2m_table(struct domain *d);
@@ -413,7 +414,8 @@
(l3e_get_flags(mpl3e[i]) & _PAGE_PRESENT) ?
l2e_from_pfn(l3e_get_pfn(mpl3e[i]), __PAGE_HYPERVISOR) :
l2e_empty();
- mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_empty();
+ for ( i = 0; i < (MACHPHYS_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
+ mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] = l2e_empty();
v->arch.monitor_table = mk_pagetable(m3mfn << PAGE_SHIFT); /* < 4GB */
v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e;
@@ -1266,14 +1268,14 @@
d->arch.shadow_fault_count = 0;
d->arch.shadow_dirty_count = 0;
- if ( (sc->dirty_bitmap == NULL) ||
+ if ( guest_handle_is_null(sc->dirty_bitmap) ||
(d->arch.shadow_dirty_bitmap == NULL) )
{
rc = -EINVAL;
break;
}
- if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+ if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
sc->pages = d->arch.shadow_dirty_bitmap_size;
#define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
@@ -1282,10 +1284,10 @@
int bytes = ((((sc->pages - i) > chunk) ?
chunk : (sc->pages - i)) + 7) / 8;
- if (copy_to_user(
- sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
+ if ( copy_to_guest_offset(
+ sc->dirty_bitmap, i/(8*sizeof(unsigned long)),
d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
- bytes))
+ (bytes+sizeof(unsigned long)-1) / sizeof(unsigned long)) )
{
rc = -EINVAL;
break;
@@ -1301,18 +1303,20 @@
sc->stats.fault_count = d->arch.shadow_fault_count;
sc->stats.dirty_count = d->arch.shadow_dirty_count;
- if ( (sc->dirty_bitmap == NULL) ||
+ if ( guest_handle_is_null(sc->dirty_bitmap) ||
(d->arch.shadow_dirty_bitmap == NULL) )
{
rc = -EINVAL;
break;
}
- if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+ if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
sc->pages = d->arch.shadow_dirty_bitmap_size;
- if (copy_to_user(sc->dirty_bitmap,
- d->arch.shadow_dirty_bitmap, (sc->pages+7)/8))
+ if ( copy_to_guest(sc->dirty_bitmap,
+ d->arch.shadow_dirty_bitmap,
+ (((sc->pages+7)/8)+sizeof(unsigned long)-1) /
+ sizeof(unsigned long)) )
{
rc = -EINVAL;
break;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/time.c Mon Mar 6 17:21:35 2006
@@ -41,7 +41,6 @@
unsigned long cpu_khz; /* CPU clock frequency in kHz. */
unsigned long hpet_address;
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
-int timer_ack = 0;
unsigned long volatile jiffies;
static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */
static spinlock_t wc_lock = SPIN_LOCK_UNLOCKED;
@@ -148,16 +147,6 @@
{
ASSERT(local_irq_is_enabled());
- if ( timer_ack )
- {
- extern spinlock_t i8259A_lock;
- spin_lock_irq(&i8259A_lock);
- outb(0x0c, 0x20);
- /* Ack the IRQ; AEOI will end it automatically. */
- inb(0x20);
- spin_unlock_irq(&i8259A_lock);
- }
-
/* Update jiffies counter. */
(*(unsigned long *)&jiffies)++;
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/traps.c Mon Mar 6 17:21:35 2006
@@ -1410,7 +1410,13 @@
struct trap_info *dst = current->arch.guest_context.trap_ctxt;
long rc = 0;
- LOCK_BIGLOCK(current->domain);
+ /* If no table is presented then clear the entire virtual IDT. */
+ if ( traps == NULL )
+ {
+ memset(dst, 0, 256 * sizeof(*dst));
+ init_int80_direct_trap(current);
+ return 0;
+ }
for ( ; ; )
{
@@ -1439,8 +1445,6 @@
traps++;
}
-
- UNLOCK_BIGLOCK(current->domain);
return rc;
}
diff -r ede16886f979 -r c4ac21dc3f16 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Mon Mar 6 16:09:18 2006
+++ b/xen/arch/x86/x86_32/domain_page.c Mon Mar 6 17:21:35 2006
@@ -11,15 +11,40 @@
#include <xen/mm.h>
#include <xen/perfc.h>
#include <xen/domain_page.h>
+#include <xen/shadow.h>
#include <asm/current.h>
#include <asm/flushtlb.h>
#include <asm/hardirq.h>
+static inline struct vcpu *mapcache_current_vcpu(void)
+{
+ struct vcpu *v;
+
+ /* In the common case we use the mapcache of the running VCPU. */
+ v = current;
+
+ /*
+ * If guest_table is NULL, and we are running a paravirtualised guest,
+ * then it means we are running on the idle domain's page table and must
+ * therefore use its mapcache.
+ */
+ if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !HVM_DOMAIN(v) )
+ {
+ /* If we really are idling, perform lazy context switch now. */
+ if ( (v = idle_vcpu[smp_processor_id()]) == current )
+ __sync_lazy_execstate();
+ /* We must now be running on the idle page table. */
+ ASSERT(read_cr3() == __pa(idle_pg_table));
+ }
+
+ return v;
+}
+
void *map_domain_page(unsigned long pfn)
{
unsigned long va;
- unsigned int idx, i, vcpu = current->vcpu_id;
- struct domain *d;
+ unsigned int idx, i, vcpu;
+ struct vcpu *v;
struct mapcache *cache;
struct vcpu_maphash_entry *hashent;
@@ -27,12 +52,10 @@
perfc_incrc(map_domain_page_count);
- /* If we are the idle domain, ensure that we run on our own page tables. */
- d = current->domain;
- if ( unlikely(is_idle_domain(d)) )
- __sync_lazy_execstate();
-
- cache = &d->arch.mapcache;
+ v = mapcache_current_vcpu();
+
+ vcpu = v->vcpu_id;
+ cache = &v->domain->arch.mapcache;
hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)];
if ( hashent->pfn == pfn )
@@ -93,7 +116,8 @@
void unmap_domain_page(void *va)
{
unsigned int idx;
- struct mapcache *cache = ¤t->domain->arch.mapcache;
+ struct vcpu *v;
+ struct mapcache *cache;
unsigned long pfn;
struct vcpu_maphash_entry *hashent;
@@ -102,9 +126,13 @@
ASSERT((void *)MAPCACHE_VIRT_START <= va);
ASSERT(va < (void *)MAPCACHE_VIRT_END);
+ v = mapcache_current_vcpu();
+
+ cache = &v->domain->arch.mapcache;
+
idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
pfn = l1e_get_pfn(cache->l1tab[idx]);
- hashent = &cache->vcpu_maphash[current->vcpu_id].hash[MAPHASH_HASHFN(pfn)];
+ hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(pfn)];
if ( hashent->idx == idx )
{
diff -r ede16886f979 -r c4ac21dc3f16 xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c Mon Mar 6 16:09:18 2006
+++ b/xen/common/dom0_ops.c Mon Mar 6 17:21:35 2006
@@ -17,13 +17,14 @@
#include <xen/trace.h>
#include <xen/console.h>
#include <xen/iocap.h>
+#include <xen/guest_access.h>
#include <asm/current.h>
#include <public/dom0_ops.h>
#include <public/sched_ctl.h>
#include <acm/acm_hooks.h>
extern long arch_do_dom0_op(
- struct dom0_op *op, struct dom0_op *u_dom0_op);
+ struct dom0_op *op, GUEST_HANDLE(dom0_op_t) u_dom0_op);
extern void arch_getdomaininfo_ctxt(
struct vcpu *, struct vcpu_guest_context *);
@@ -89,7 +90,7 @@
memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
}
-long do_dom0_op(struct dom0_op *u_dom0_op)
+long do_dom0_op(GUEST_HANDLE(dom0_op_t) u_dom0_op)
{
long ret = 0;
struct dom0_op curop, *op = &curop;
@@ -99,7 +100,7 @@
if ( !IS_PRIV(current->domain) )
return -EPERM;
- if ( copy_from_user(op, u_dom0_op, sizeof(*op)) )
+ if ( copy_from_guest(op, u_dom0_op, 1) )
return -EFAULT;
if ( op->interface_version != DOM0_INTERFACE_VERSION )
@@ -239,7 +240,7 @@
ret = 0;
op->u.createdomain.domain = d->domain_id;
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
@@ -357,7 +358,7 @@
case DOM0_SCHEDCTL:
{
ret = sched_ctl(&op->u.schedctl);
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
@@ -365,7 +366,7 @@
case DOM0_ADJUSTDOM:
{
ret = sched_adjdom(&op->u.adjustdom);
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
@@ -398,20 +399,17 @@
getdomaininfo(d, &op->u.getdomaininfo);
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
put_domain(d);
}
break;
-
-
case DOM0_GETDOMAININFOLIST:
{
struct domain *d;
dom0_getdomaininfo_t info;
- dom0_getdomaininfo_t *buffer = op->u.getdomaininfolist.buffer;
u32 num_domains = 0;
read_lock(&domlist_lock);
@@ -432,13 +430,13 @@
put_domain(d);
- if ( copy_to_user(buffer, &info, sizeof(dom0_getdomaininfo_t)) )
+ if ( copy_to_guest_offset(op->u.getdomaininfolist.buffer,
+ num_domains, &info, 1) )
{
ret = -EFAULT;
break;
}
- buffer++;
num_domains++;
}
@@ -449,7 +447,7 @@
op->u.getdomaininfolist.num_domains = num_domains;
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
@@ -489,12 +487,12 @@
if ( v != current )
vcpu_unpause(v);
- if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
+ if ( copy_to_guest(op->u.getvcpucontext.ctxt, c, 1) )
ret = -EFAULT;
xfree(c);
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
getvcpucontext_out:
@@ -534,7 +532,7 @@
(int)sizeof(op->u.getvcpuinfo.cpumap)));
ret = 0;
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
getvcpuinfo_out:
@@ -554,7 +552,7 @@
case DOM0_TBUFCONTROL:
{
ret = tb_control(&op->u.tbufcontrol);
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
@@ -562,10 +560,10 @@
case DOM0_READCONSOLE:
{
ret = read_console_ring(
- &op->u.readconsole.buffer,
+ op->u.readconsole.buffer,
&op->u.readconsole.count,
op->u.readconsole.clear);
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
@@ -573,7 +571,7 @@
case DOM0_SCHED_ID:
{
op->u.sched_id.sched_id = sched_id();
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
else
ret = 0;
@@ -678,15 +676,15 @@
{
extern int perfc_control(dom0_perfccontrol_t *);
ret = perfc_control(&op->u.perfccontrol);
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
break;
#endif
default:
- ret = arch_do_dom0_op(op,u_dom0_op);
-
+ ret = arch_do_dom0_op(op, u_dom0_op);
+ break;
}
spin_unlock(&dom0_lock);
diff -r ede16886f979 -r c4ac21dc3f16 xen/common/domain.c
--- a/xen/common/domain.c Mon Mar 6 16:09:18 2006
+++ b/xen/common/domain.c Mon Mar 6 17:21:35 2006
@@ -17,6 +17,7 @@
#include <xen/softirq.h>
#include <xen/domain_page.h>
#include <xen/rangeset.h>
+#include <xen/guest_access.h>
#include <asm/debugger.h>
#include <public/dom0_ops.h>
#include <public/sched.h>
@@ -380,7 +381,7 @@
domain_pause(d);
rc = -EFAULT;
- if ( copy_from_user(c, setvcpucontext->ctxt, sizeof(*c)) == 0 )
+ if ( copy_from_guest(c, setvcpucontext->ctxt, 1) == 0 )
rc = arch_set_info_guest(v, c);
domain_unpause(d);
diff -r ede16886f979 -r c4ac21dc3f16 xen/common/gdbstub.c
--- a/xen/common/gdbstub.c Mon Mar 6 16:09:18 2006
+++ b/xen/common/gdbstub.c Mon Mar 6 17:21:35 2006
@@ -376,7 +376,6 @@
break;
case 'g': /* Read registers */
gdb_arch_read_reg_array(regs, ctx);
- ASSERT(!local_irq_is_enabled());
break;
case 'G': /* Write registers */
gdb_arch_write_reg_array(regs, ctx->in_buf + 1, ctx);
@@ -395,7 +394,6 @@
return 0;
}
gdb_cmd_read_mem(addr, length, ctx);
- ASSERT(!local_irq_is_enabled());
break;
case 'M': /* Write memory */
addr = simple_strtoul(ctx->in_buf + 1, &ptr, 16);
@@ -477,7 +475,7 @@
{
int resume = 0;
int r;
- unsigned flags;
+ unsigned long flags;
if ( gdb_ctx->serhnd < 0 )
{
@@ -506,7 +504,7 @@
if ( !gdb_ctx->connected )
{
- printk("GDB connection activated\n");
+ printk("GDB connection activated.\n");
gdb_arch_print_state(regs);
gdb_ctx->connected = 1;
}
@@ -522,7 +520,7 @@
/* Shouldn't really do this, but otherwise we stop for no
obvious reason, which is Bad */
- printk("Waiting for GDB to attach to Gdb\n");
+ printk("Waiting for GDB to attach...\n");
gdb_arch_enter(regs);
gdb_ctx->signum = gdb_arch_signal_num(regs, cookie);
@@ -535,9 +533,7 @@
while ( resume == 0 )
{
- ASSERT(!local_irq_is_enabled());
r = receive_command(gdb_ctx);
- ASSERT(!local_irq_is_enabled());
if ( r < 0 )
{
dbg_printk("GDB disappeared, trying to resume Xen...\n");
@@ -545,9 +541,7 @@
}
else
{
- ASSERT(!local_irq_is_enabled());
resume = process_command(regs, gdb_ctx);
- ASSERT(!local_irq_is_enabled());
}
}
@@ -561,27 +555,13 @@
return 0;
}
-/*
- * initialization
- * XXX TODO
- * This should be an explicit call from architecture code.
- * initcall is far too late for some early debugging, and only the
- * architecture code knows when this call can be made.
- */
-static int
-initialize_gdb(void)
-{
- if ( !strcmp(opt_gdb, "none") )
- return 0;
+void
+initialise_gdb(void)
+{
gdb_ctx->serhnd = serial_parse_handle(opt_gdb);
- if ( gdb_ctx->serhnd == -1 )
- panic("Can't parse %s as GDB serial info.\n", opt_gdb);
-
- printk("Gdb initialised.\n");
- return 0;
-}
-
-__initcall(initialize_gdb);
+ if ( gdb_ctx->serhnd != -1 )
+ printk("GDB stub initialised.\n");
+}
/*
* Local variables:
diff -r ede16886f979 -r c4ac21dc3f16 xen/common/memory.c
--- a/xen/common/memory.c Mon Mar 6 16:09:18 2006
+++ b/xen/common/memory.c Mon Mar 6 17:21:35 2006
@@ -31,7 +31,7 @@
static long
increase_reservation(
struct domain *d,
- GUEST_HANDLE(xen_ulong) extent_list,
+ GUEST_HANDLE(ulong) extent_list,
unsigned int nr_extents,
unsigned int extent_order,
unsigned int flags,
@@ -80,7 +80,7 @@
static long
populate_physmap(
struct domain *d,
- GUEST_HANDLE(xen_ulong) extent_list,
+ GUEST_HANDLE(ulong) extent_list,
unsigned int nr_extents,
unsigned int extent_order,
unsigned int flags,
@@ -141,7 +141,7 @@
static long
decrease_reservation(
struct domain *d,
- GUEST_HANDLE(xen_ulong) extent_list,
+ GUEST_HANDLE(ulong) extent_list,
unsigned int nr_extents,
unsigned int extent_order,
unsigned int flags,
diff -r ede16886f979 -r c4ac21dc3f16 xen/common/perfc.c
--- a/xen/common/perfc.c Mon Mar 6 16:09:18 2006
+++ b/xen/common/perfc.c Mon Mar 6 17:21:35 2006
@@ -5,9 +5,10 @@
#include <xen/perfc.h>
#include <xen/keyhandler.h>
#include <xen/spinlock.h>
+#include <xen/mm.h>
+#include <xen/guest_access.h>
#include <public/dom0_ops.h>
#include <asm/uaccess.h>
-#include <xen/mm.h>
#undef PERFCOUNTER
#undef PERFCOUNTER_CPU
@@ -131,12 +132,12 @@
static dom0_perfc_desc_t perfc_d[NR_PERFCTRS];
static int perfc_init = 0;
-static int perfc_copy_info(dom0_perfc_desc_t *desc)
+static int perfc_copy_info(GUEST_HANDLE(dom0_perfc_desc_t) desc)
{
unsigned int i, j;
atomic_t *counters = (atomic_t *)&perfcounters;
- if ( desc == NULL )
+ if ( guest_handle_is_null(desc) )
return 0;
/* We only copy the name and array-size information once. */
@@ -196,7 +197,7 @@
}
}
- return (copy_to_user(desc, perfc_d, NR_PERFCTRS * sizeof(*desc)) ?
+ return (copy_to_guest(desc, (dom0_perfc_desc_t *)perfc_d, NR_PERFCTRS) ?
-EFAULT : 0);
}
diff -r ede16886f979 -r c4ac21dc3f16 xen/drivers/char/console.c
--- a/xen/drivers/char/console.c Mon Mar 6 16:09:18 2006
+++ b/xen/drivers/char/console.c Mon Mar 6 17:21:35 2006
@@ -20,6 +20,7 @@
#include <xen/keyhandler.h>
#include <xen/mm.h>
#include <xen/delay.h>
+#include <xen/guest_access.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/debugger.h>
@@ -221,9 +222,8 @@
conringc = conringp - CONRING_SIZE;
}
-long read_console_ring(char **pstr, u32 *pcount, int clear)
-{
- char *str = *pstr;
+long read_console_ring(GUEST_HANDLE(char) str, u32 *pcount, int clear)
+{
unsigned int idx, len, max, sofar, c;
unsigned long flags;
@@ -239,7 +239,7 @@
len = CONRING_SIZE - idx;
if ( (sofar + len) > max )
len = max - sofar;
- if ( copy_to_user(str + sofar, &conring[idx], len) )
+ if ( copy_to_guest_offset(str, sofar, &conring[idx], len) )
return -EFAULT;
sofar += len;
c += len;
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/asm-x86/hvm/support.h Mon Mar 6 17:21:35 2006
@@ -26,7 +26,11 @@
#include <asm/regs.h>
#include <asm/processor.h>
+#ifndef NDEBUG
#define HVM_DEBUG 1
+#else
+#define HVM_DEBUG 0
+#endif
#define HVM_DOMAIN(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
@@ -113,7 +117,7 @@
#define DBG_LEVEL_VMMU (1 << 5)
#define DBG_LEVEL_VLAPIC (1 << 6)
#define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
-#define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 7)
+#define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
#define DBG_LEVEL_IOAPIC (1 << 9)
extern unsigned int opt_hvm_debug_level;
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/asm-x86/processor.h Mon Mar 6 17:21:35 2006
@@ -352,6 +352,13 @@
outb((reg), 0x22); \
outb((data), 0x23); \
} while (0)
+
+/* Stop speculative execution */
+static inline void sync_core(void)
+{
+ int tmp;
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
+}
static always_inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/asm-x86/time.h
--- a/xen/include/asm-x86/time.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/asm-x86/time.h Mon Mar 6 17:21:35 2006
@@ -3,8 +3,6 @@
#define __X86_TIME_H__
#include <asm/msr.h>
-
-extern int timer_ack;
extern void calibrate_tsc_bp(void);
extern void calibrate_tsc_ap(void);
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/arch-ia64.h Mon Mar 6 17:21:35 2006
@@ -6,6 +6,28 @@
#ifndef __HYPERVISOR_IF_IA64_H__
#define __HYPERVISOR_IF_IA64_H__
+
+#ifdef __XEN__
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+#endif
/* Maximum number of virtual CPUs in multi-processor guests. */
/* WARNING: before changing this, check that shared_info fits on a page */
@@ -298,6 +320,7 @@
arch_initrd_info_t initrd;
char cmdline[IA64_COMMAND_LINE_SIZE];
} vcpu_guest_context_t;
+DEFINE_GUEST_HANDLE(vcpu_guest_context_t);
#endif /* !__ASSEMBLY__ */
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/arch-x86_32.h
--- a/xen/include/public/arch-x86_32.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/arch-x86_32.h Mon Mar 6 17:21:35 2006
@@ -8,6 +8,28 @@
#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
#define __XEN_PUBLIC_ARCH_X86_32_H__
+
+#ifdef __XEN__
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+#endif
/*
* SEGMENT DESCRIPTOR TABLES
@@ -130,6 +152,7 @@
unsigned long failsafe_callback_eip;
unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
} vcpu_guest_context_t;
+DEFINE_GUEST_HANDLE(vcpu_guest_context_t);
typedef struct arch_shared_info {
unsigned long max_pfn; /* max pfn that appears in table */
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/arch-x86_64.h
--- a/xen/include/public/arch-x86_64.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/arch-x86_64.h Mon Mar 6 17:21:35 2006
@@ -8,6 +8,28 @@
#ifndef __XEN_PUBLIC_ARCH_X86_64_H__
#define __XEN_PUBLIC_ARCH_X86_64_H__
+
+#ifdef __XEN__
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+#endif
/*
* SEGMENT DESCRIPTOR TABLES
@@ -215,6 +237,7 @@
uint64_t gs_base_kernel;
uint64_t gs_base_user;
} vcpu_guest_context_t;
+DEFINE_GUEST_HANDLE(vcpu_guest_context_t);
typedef struct arch_shared_info {
unsigned long max_pfn; /* max pfn that appears in table */
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/dom0_ops.h Mon Mar 6 17:21:35 2006
@@ -28,18 +28,21 @@
/* IN variables. */
domid_t domain;
unsigned long max_pfns;
- void *buffer;
+ GUEST_HANDLE(ulong) buffer;
/* OUT variables. */
unsigned long num_pfns;
} dom0_getmemlist_t;
+DEFINE_GUEST_HANDLE(dom0_getmemlist_t);
#define DOM0_SCHEDCTL 6
/* struct sched_ctl_cmd is from sched-ctl.h */
typedef struct sched_ctl_cmd dom0_schedctl_t;
+DEFINE_GUEST_HANDLE(dom0_schedctl_t);
#define DOM0_ADJUSTDOM 7
/* struct sched_adjdom_cmd is from sched-ctl.h */
typedef struct sched_adjdom_cmd dom0_adjustdom_t;
+DEFINE_GUEST_HANDLE(dom0_adjustdom_t);
#define DOM0_CREATEDOMAIN 8
typedef struct dom0_createdomain {
@@ -50,24 +53,28 @@
/* Identifier for new domain (auto-allocate if zero is specified). */
domid_t domain;
} dom0_createdomain_t;
+DEFINE_GUEST_HANDLE(dom0_createdomain_t);
#define DOM0_DESTROYDOMAIN 9
typedef struct dom0_destroydomain {
/* IN variables. */
domid_t domain;
} dom0_destroydomain_t;
+DEFINE_GUEST_HANDLE(dom0_destroydomain_t);
#define DOM0_PAUSEDOMAIN 10
typedef struct dom0_pausedomain {
/* IN parameters. */
domid_t domain;
} dom0_pausedomain_t;
+DEFINE_GUEST_HANDLE(dom0_pausedomain_t);
#define DOM0_UNPAUSEDOMAIN 11
typedef struct dom0_unpausedomain {
/* IN parameters. */
domid_t domain;
} dom0_unpausedomain_t;
+DEFINE_GUEST_HANDLE(dom0_unpausedomain_t);
#define DOM0_GETDOMAININFO 12
typedef struct dom0_getdomaininfo {
@@ -93,6 +100,7 @@
uint32_t ssidref;
xen_domain_handle_t handle;
} dom0_getdomaininfo_t;
+DEFINE_GUEST_HANDLE(dom0_getdomaininfo_t);
#define DOM0_SETVCPUCONTEXT 13
typedef struct dom0_setvcpucontext {
@@ -100,8 +108,9 @@
domid_t domain;
uint32_t vcpu;
/* IN/OUT parameters */
- vcpu_guest_context_t *ctxt;
+ GUEST_HANDLE(vcpu_guest_context_t) ctxt;
} dom0_setvcpucontext_t;
+DEFINE_GUEST_HANDLE(dom0_setvcpucontext_t);
#define DOM0_MSR 15
typedef struct dom0_msr {
@@ -115,6 +124,7 @@
uint32_t out1;
uint32_t out2;
} dom0_msr_t;
+DEFINE_GUEST_HANDLE(dom0_msr_t);
/*
* Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
@@ -127,6 +137,7 @@
uint32_t nsecs;
uint64_t system_time;
} dom0_settime_t;
+DEFINE_GUEST_HANDLE(dom0_settime_t);
#define DOM0_GETPAGEFRAMEINFO 18
#define NOTAB 0 /* normal page */
@@ -147,6 +158,7 @@
/* Is the page PINNED to a type? */
uint32_t type; /* see above type defs */
} dom0_getpageframeinfo_t;
+DEFINE_GUEST_HANDLE(dom0_getpageframeinfo_t);
/*
* Read console content from Xen buffer ring.
@@ -154,11 +166,12 @@
#define DOM0_READCONSOLE 19
typedef struct dom0_readconsole {
/* IN variables. */
- uint32_t clear; /* Non-zero -> clear after reading. */
+ uint32_t clear; /* Non-zero -> clear after reading. */
/* IN/OUT variables. */
- char *buffer; /* In: Buffer start; Out: Used buffer start */
- uint32_t count; /* In: Buffer size; Out: Used buffer size */
+ GUEST_HANDLE(char) buffer; /* In: Buffer start; Out: Used buffer start */
+ uint32_t count; /* In: Buffer size; Out: Used buffer size */
} dom0_readconsole_t;
+DEFINE_GUEST_HANDLE(dom0_readconsole_t);
/*
* Set which physical cpus a vcpu can execute on.
@@ -170,6 +183,7 @@
uint32_t vcpu;
cpumap_t cpumap;
} dom0_setvcpuaffinity_t;
+DEFINE_GUEST_HANDLE(dom0_setvcpuaffinity_t);
/* Get trace buffers machine base address */
#define DOM0_TBUFCONTROL 21
@@ -189,6 +203,7 @@
unsigned long buffer_mfn;
uint32_t size;
} dom0_tbufcontrol_t;
+DEFINE_GUEST_HANDLE(dom0_tbufcontrol_t);
/*
* Get physical information about the host machine
@@ -204,6 +219,7 @@
unsigned long free_pages;
uint32_t hw_cap[8];
} dom0_physinfo_t;
+DEFINE_GUEST_HANDLE(dom0_physinfo_t);
/*
* Get the ID of the current scheduler.
@@ -213,6 +229,7 @@
/* OUT variable */
uint32_t sched_id;
} dom0_sched_id_t;
+DEFINE_GUEST_HANDLE(dom0_sched_id_t);
/*
* Control shadow pagetables operation
@@ -234,17 +251,19 @@
uint32_t dirty_net_count;
uint32_t dirty_block_count;
} dom0_shadow_control_stats_t;
+DEFINE_GUEST_HANDLE(dom0_shadow_control_stats_t);
typedef struct dom0_shadow_control {
/* IN variables. */
domid_t domain;
uint32_t op;
- unsigned long *dirty_bitmap; /* pointer to locked buffer */
+ GUEST_HANDLE(ulong) dirty_bitmap;
/* IN/OUT variables. */
unsigned long pages; /* size of buffer, updated with actual size */
/* OUT variables. */
dom0_shadow_control_stats_t stats;
} dom0_shadow_control_t;
+DEFINE_GUEST_HANDLE(dom0_shadow_control_t);
#define DOM0_SETDOMAINMAXMEM 28
typedef struct dom0_setdomainmaxmem {
@@ -252,6 +271,7 @@
domid_t domain;
unsigned long max_memkb;
} dom0_setdomainmaxmem_t;
+DEFINE_GUEST_HANDLE(dom0_setdomainmaxmem_t);
#define DOM0_GETPAGEFRAMEINFO2 29 /* batched interface */
typedef struct dom0_getpageframeinfo2 {
@@ -259,8 +279,9 @@
domid_t domain;
unsigned long num;
/* IN/OUT variables. */
- unsigned long *array;
+ GUEST_HANDLE(ulong) array;
} dom0_getpageframeinfo2_t;
+DEFINE_GUEST_HANDLE(dom0_getpageframeinfo2_t);
/*
* Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
@@ -279,6 +300,7 @@
uint32_t handle;
uint32_t reg;
} dom0_add_memtype_t;
+DEFINE_GUEST_HANDLE(dom0_add_memtype_t);
/*
* Tear down an existing memory-range type. If @handle is remembered then it
@@ -293,6 +315,7 @@
uint32_t handle;
uint32_t reg;
} dom0_del_memtype_t;
+DEFINE_GUEST_HANDLE(dom0_del_memtype_t);
/* Read current type of an MTRR (x86-specific). */
#define DOM0_READ_MEMTYPE 33
@@ -304,6 +327,7 @@
unsigned long nr_mfns;
uint32_t type;
} dom0_read_memtype_t;
+DEFINE_GUEST_HANDLE(dom0_read_memtype_t);
/* Interface for controlling Xen software performance counters. */
#define DOM0_PERFCCONTROL 34
@@ -315,20 +339,23 @@
uint32_t nr_vals; /* number of values for this counter */
uint32_t vals[64]; /* array of values */
} dom0_perfc_desc_t;
+DEFINE_GUEST_HANDLE(dom0_perfc_desc_t);
typedef struct dom0_perfccontrol {
/* IN variables. */
uint32_t op; /* DOM0_PERFCCONTROL_OP_??? */
/* OUT variables. */
uint32_t nr_counters; /* number of counters */
- dom0_perfc_desc_t *desc; /* counter information (or NULL) */
+ GUEST_HANDLE(dom0_perfc_desc_t) desc; /* counter information (or NULL) */
} dom0_perfccontrol_t;
+DEFINE_GUEST_HANDLE(dom0_perfccontrol_t);
#define DOM0_MICROCODE 35
typedef struct dom0_microcode {
/* IN variables. */
- void *data; /* Pointer to microcode data */
+ GUEST_HANDLE(void) data; /* Pointer to microcode data */
uint32_t length; /* Length of microcode data. */
} dom0_microcode_t;
+DEFINE_GUEST_HANDLE(dom0_microcode_t);
#define DOM0_IOPORT_PERMISSION 36
typedef struct dom0_ioport_permission {
@@ -337,6 +364,7 @@
uint32_t nr_ports; /* size of port range */
uint8_t allow_access; /* allow or deny access to range? */
} dom0_ioport_permission_t;
+DEFINE_GUEST_HANDLE(dom0_ioport_permission_t);
#define DOM0_GETVCPUCONTEXT 37
typedef struct dom0_getvcpucontext {
@@ -344,8 +372,9 @@
domid_t domain; /* domain to be affected */
uint32_t vcpu; /* vcpu # */
/* OUT variables. */
- vcpu_guest_context_t *ctxt;
+ GUEST_HANDLE(vcpu_guest_context_t) ctxt;
} dom0_getvcpucontext_t;
+DEFINE_GUEST_HANDLE(dom0_getvcpucontext_t);
#define DOM0_GETVCPUINFO 43
typedef struct dom0_getvcpuinfo {
@@ -360,16 +389,18 @@
uint32_t cpu; /* current mapping */
cpumap_t cpumap; /* allowable mapping */
} dom0_getvcpuinfo_t;
+DEFINE_GUEST_HANDLE(dom0_getvcpuinfo_t);
#define DOM0_GETDOMAININFOLIST 38
typedef struct dom0_getdomaininfolist {
/* IN variables. */
domid_t first_domain;
uint32_t max_domains;
- dom0_getdomaininfo_t *buffer;
+ GUEST_HANDLE(dom0_getdomaininfo_t) buffer;
/* OUT variables. */
uint32_t num_domains;
} dom0_getdomaininfolist_t;
+DEFINE_GUEST_HANDLE(dom0_getdomaininfolist_t);
#define DOM0_PLATFORM_QUIRK 39
#define QUIRK_NOIRQBALANCING 1
@@ -377,37 +408,44 @@
/* IN variables. */
uint32_t quirk_id;
} dom0_platform_quirk_t;
+DEFINE_GUEST_HANDLE(dom0_platform_quirk_t);
#define DOM0_PHYSICAL_MEMORY_MAP 40
+typedef struct dom0_memory_map_entry {
+ uint64_t start, end;
+ uint32_t flags; /* reserved */
+ uint8_t is_ram;
+} dom0_memory_map_entry_t;
+DEFINE_GUEST_HANDLE(dom0_memory_map_entry_t);
typedef struct dom0_physical_memory_map {
/* IN variables. */
uint32_t max_map_entries;
/* OUT variables. */
uint32_t nr_map_entries;
- struct dom0_memory_map_entry {
- uint64_t start, end;
- uint32_t flags; /* reserved */
- uint8_t is_ram;
- } *memory_map;
+ GUEST_HANDLE(dom0_memory_map_entry_t) memory_map;
} dom0_physical_memory_map_t;
+DEFINE_GUEST_HANDLE(dom0_physical_memory_map_t);
#define DOM0_MAX_VCPUS 41
typedef struct dom0_max_vcpus {
domid_t domain; /* domain to be affected */
uint32_t max; /* maximum number of vcpus */
} dom0_max_vcpus_t;
+DEFINE_GUEST_HANDLE(dom0_max_vcpus_t);
#define DOM0_SETDOMAINHANDLE 44
typedef struct dom0_setdomainhandle {
domid_t domain;
xen_domain_handle_t handle;
} dom0_setdomainhandle_t;
+DEFINE_GUEST_HANDLE(dom0_setdomainhandle_t);
#define DOM0_SETDEBUGGING 45
typedef struct dom0_setdebugging {
domid_t domain;
uint8_t enable;
} dom0_setdebugging_t;
+DEFINE_GUEST_HANDLE(dom0_setdebugging_t);
#define DOM0_IRQ_PERMISSION 46
typedef struct dom0_irq_permission {
@@ -415,6 +453,7 @@
uint8_t pirq;
uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
} dom0_irq_permission_t;
+DEFINE_GUEST_HANDLE(dom0_irq_permission_t);
#define DOM0_IOMEM_PERMISSION 47
typedef struct dom0_iomem_permission {
@@ -423,12 +462,14 @@
unsigned long nr_mfns; /* number of pages in range (>0) */
uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
} dom0_iomem_permission_t;
+DEFINE_GUEST_HANDLE(dom0_iomem_permission_t);
#define DOM0_HYPERCALL_INIT 48
typedef struct dom0_hypercall_init {
domid_t domain; /* domain to be affected */
unsigned long mfn; /* machine frame to be initialised */
} dom0_hypercall_init_t;
+DEFINE_GUEST_HANDLE(dom0_hypercall_init_t);
typedef struct dom0_op {
uint32_t cmd;
@@ -471,9 +512,10 @@
struct dom0_irq_permission irq_permission;
struct dom0_iomem_permission iomem_permission;
struct dom0_hypercall_init hypercall_init;
- uint8_t pad[128];
+ uint8_t pad[128];
} u;
} dom0_op_t;
+DEFINE_GUEST_HANDLE(dom0_op_t);
#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/hvm/ioreq.h Mon Mar 6 17:21:35 2006
@@ -71,8 +71,8 @@
typedef struct {
ioreq_t vp_ioreq;
/* Event channel port */
- unsigned long vp_eport; /* VMX vcpu uses this to notify DM */
- unsigned long dm_eport; /* DM uses this to notify VMX vcpu */
+ unsigned int vp_eport; /* VMX vcpu uses this to notify DM */
+ unsigned int dm_eport; /* DM uses this to notify VMX vcpu */
} vcpu_iodata_t;
typedef struct {
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/io/tpmif.h
--- a/xen/include/public/io/tpmif.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/io/tpmif.h Mon Mar 6 17:21:35 2006
@@ -21,7 +21,7 @@
typedef struct {
unsigned long addr; /* Machine address of packet. */
grant_ref_t ref; /* grant table access reference */
- uint16_t id; /* Echoed in response message. */
+ uint16_t unused;
uint16_t size; /* Packet size in bytes. */
} tpmif_tx_request_t;
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/memory.h
--- a/xen/include/public/memory.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/memory.h Mon Mar 6 17:21:35 2006
@@ -29,7 +29,7 @@
* OUT: GMFN bases of extents that were allocated
* (NB. This command also updates the mach_to_phys translation table)
*/
- GUEST_HANDLE(xen_ulong) extent_start;
+ GUEST_HANDLE(ulong) extent_start;
/* Number of extents, and size/alignment of each (2^extent_order pages). */
unsigned long nr_extents;
@@ -86,7 +86,7 @@
* any large discontiguities in the machine address space, 2MB gaps in
* the machphys table will be represented by an MFN base of zero.
*/
- GUEST_HANDLE(xen_ulong) extent_start;
+ GUEST_HANDLE(ulong) extent_start;
/*
* Number of extents written to the above array. This will be smaller
@@ -130,13 +130,13 @@
unsigned long nr_gpfns;
/* List of GPFNs to translate. */
- GUEST_HANDLE(xen_ulong) gpfn_list;
+ GUEST_HANDLE(ulong) gpfn_list;
/*
* Output list to contain MFN translations. May be the same as the input
* list (in which case each input GPFN is overwritten with the output MFN).
*/
- GUEST_HANDLE(xen_ulong) mfn_list;
+ GUEST_HANDLE(ulong) mfn_list;
} xen_translate_gpfn_list_t;
DEFINE_GUEST_HANDLE(xen_translate_gpfn_list_t);
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/physdev.h
--- a/xen/include/public/physdev.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/physdev.h Mon Mar 6 17:21:35 2006
@@ -33,8 +33,8 @@
typedef struct physdevop_apic {
/* IN */
- uint32_t apic;
- uint32_t offset;
+ unsigned long apic_physbase;
+ uint32_t reg;
/* IN or OUT */
uint32_t value;
} physdevop_apic_t;
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/public/xen.h
--- a/xen/include/public/xen.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/public/xen.h Mon Mar 6 17:21:35 2006
@@ -8,22 +8,6 @@
#ifndef __XEN_PUBLIC_XEN_H__
#define __XEN_PUBLIC_XEN_H__
-
-#ifdef __XEN__
-#define DEFINE_GUEST_HANDLE(type) struct __guest_handle_ ## type { type *p; }
-#define GUEST_HANDLE(type) struct __guest_handle_ ## type
-#else
-#define DEFINE_GUEST_HANDLE(type)
-#define GUEST_HANDLE(type) type *
-#endif
-
-#ifndef __ASSEMBLY__
-/* Guest handle for unsigned long pointer. Define a name with no whitespace. */
-typedef unsigned long xen_ulong;
-DEFINE_GUEST_HANDLE(xen_ulong);
-/* Guest handle for arbitrary-type pointer (void *). */
-DEFINE_GUEST_HANDLE(void);
-#endif
#if defined(__i386__)
#include "arch-x86_32.h"
@@ -396,8 +380,8 @@
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
* c. list of allocated page frames [mfn_list, nr_pages]
- * d. bootstrap page tables [pt_base, CR3 (x86)]
- * e. start_info_t structure [register ESI (x86)]
+ * d. start_info_t structure [register ESI (x86)]
+ * e. bootstrap page tables [pt_base, CR3 (x86)]
* f. bootstrap stack [register ESP (x86)]
* 5. Bootstrap elements are packed together, but each is 4kB-aligned.
* 6. The initial ram disk may be omitted.
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/xen/console.h
--- a/xen/include/xen/console.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/xen/console.h Mon Mar 6 17:21:35 2006
@@ -8,12 +8,13 @@
#define __CONSOLE_H__
#include <xen/spinlock.h>
+#include <xen/guest_access.h>
extern spinlock_t console_lock;
void set_printk_prefix(const char *prefix);
-long read_console_ring(char **, u32 *, int);
+long read_console_ring(GUEST_HANDLE(char), u32 *, int);
void init_console(void);
void console_endboot(int disable_vga);
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/xen/gdbstub.h
--- a/xen/include/xen/gdbstub.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/xen/gdbstub.h Mon Mar 6 17:21:35 2006
@@ -20,6 +20,8 @@
#ifndef __XEN_GDBSTUB_H__
#define __XEN_GDBSTUB_H__
+
+#ifdef CRASH_DEBUG
/* value <-> char (de)serialzers for arch specific gdb backends */
char hex2char(unsigned long x);
@@ -84,6 +86,14 @@
#define SIGALRM 14
#define SIGTERM 15
+void initialise_gdb(void);
+
+#else
+
+#define initialise_gdb() ((void)0)
+
+#endif
+
#endif /* __XEN_GDBSTUB_H__ */
/*
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/xen/guest_access.h
--- a/xen/include/xen/guest_access.h Mon Mar 6 16:09:18 2006
+++ b/xen/include/xen/guest_access.h Mon Mar 6 17:21:35 2006
@@ -7,64 +7,17 @@
#ifndef __XEN_GUEST_ACCESS_H__
#define __XEN_GUEST_ACCESS_H__
-#include <asm/uaccess.h>
+#include <asm/guest_access.h>
-/* Is the guest handle a NULL reference? */
-#define guest_handle_is_null(hnd) ((hnd).p == NULL)
-
-/* Offset the given guest handle into the array it refers to. */
-#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr))
-
-/* Cast a guest handle to the specified type of handle. */
-#define guest_handle_cast(hnd, type) ({ \
- type *_x = (hnd).p; \
- (GUEST_HANDLE(type)) { _x }; \
-})
-
-/*
- * Copy an array of objects to guest context via a guest handle.
- * Optionally specify an offset into the guest array.
- */
-#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \
- const typeof(ptr) _x = (hnd).p; \
- const typeof(ptr) _y = (ptr); \
- copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
-})
#define copy_to_guest(hnd, ptr, nr) \
copy_to_guest_offset(hnd, 0, ptr, nr)
-/*
- * Copy an array of objects from guest context via a guest handle.
- * Optionally specify an offset into the guest array.
- */
-#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \
- const typeof(ptr) _x = (hnd).p; \
- const typeof(ptr) _y = (ptr); \
- copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
-})
#define copy_from_guest(ptr, hnd, nr) \
copy_from_guest_offset(ptr, hnd, 0, nr)
-/*
- * Pre-validate a guest handle.
- * Allows use of faster __copy_* functions.
- */
-#define guest_handle_okay(hnd, nr) \
- array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))
-
-#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \
- const typeof(ptr) _x = (hnd).p; \
- const typeof(ptr) _y = (ptr); \
- __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
-})
#define __copy_to_guest(hnd, ptr, nr) \
__copy_to_guest_offset(hnd, 0, ptr, nr)
-#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \
- const typeof(ptr) _x = (hnd).p; \
- const typeof(ptr) _y = (ptr); \
- __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
-})
#define __copy_from_guest(ptr, hnd, nr) \
__copy_from_guest_offset(ptr, hnd, 0, nr)
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/hashtable.c
--- /dev/null Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/hashtable.c Mon Mar 6 17:21:35 2006
@@ -0,0 +1,276 @@
+/* Copyright (C) 2004 Christopher Clark <firstname.lastname@xxxxxxxxxxxx> */
+
+#include "hashtable.h"
+#include "hashtable_private.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <stdint.h>
+
+/*
+Credit for primes table: Aaron Krowne
+ http://br.endernet.org/~akrowne/
+ http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
+*/
+static const unsigned int primes[] = {
+53, 97, 193, 389,
+769, 1543, 3079, 6151,
+12289, 24593, 49157, 98317,
+196613, 393241, 786433, 1572869,
+3145739, 6291469, 12582917, 25165843,
+50331653, 100663319, 201326611, 402653189,
+805306457, 1610612741
+};
+const unsigned int prime_table_length = sizeof(primes)/sizeof(primes[0]);
+const unsigned int max_load_factor = 65; /* percentage */
+
+/*****************************************************************************/
+struct hashtable *
+create_hashtable(unsigned int minsize,
+ unsigned int (*hashf) (void*),
+ int (*eqf) (void*,void*))
+{
+ struct hashtable *h;
+ unsigned int pindex, size = primes[0];
+ /* Check requested hashtable isn't too large */
+ if (minsize > (1u << 30)) return NULL;
+ /* Enforce size as prime */
+ for (pindex=0; pindex < prime_table_length; pindex++) {
+ if (primes[pindex] > minsize) { size = primes[pindex]; break; }
+ }
+ h = (struct hashtable *)malloc(sizeof(struct hashtable));
+ if (NULL == h) return NULL; /*oom*/
+ h->table = (struct entry **)malloc(sizeof(struct entry*) * size);
+ if (NULL == h->table) { free(h); return NULL; } /*oom*/
+ memset(h->table, 0, size * sizeof(struct entry *));
+ h->tablelength = size;
+ h->primeindex = pindex;
+ h->entrycount = 0;
+ h->hashfn = hashf;
+ h->eqfn = eqf;
+ h->loadlimit = (unsigned int)(((uint64_t)size * max_load_factor) / 100);
+ return h;
+}
+
+/*****************************************************************************/
+unsigned int
+hash(struct hashtable *h, void *k)
+{
+ /* Aim to protect against poor hash functions by adding logic here
+ * - logic taken from java 1.4 hashtable source */
+ unsigned int i = h->hashfn(k);
+ i += ~(i << 9);
+ i ^= ((i >> 14) | (i << 18)); /* >>> */
+ i += (i << 4);
+ i ^= ((i >> 10) | (i << 22)); /* >>> */
+ return i;
+}
+
+/*****************************************************************************/
+static int
+hashtable_expand(struct hashtable *h)
+{
+ /* Double the size of the table to accomodate more entries */
+ struct entry **newtable;
+ struct entry *e;
+ struct entry **pE;
+ unsigned int newsize, i, index;
+ /* Check we're not hitting max capacity */
+ if (h->primeindex == (prime_table_length - 1)) return 0;
+ newsize = primes[++(h->primeindex)];
+
+ newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize);
+ if (NULL != newtable)
+ {
+ memset(newtable, 0, newsize * sizeof(struct entry *));
+ /* This algorithm is not 'stable'. ie. it reverses the list
+ * when it transfers entries between the tables */
+ for (i = 0; i < h->tablelength; i++) {
+ while (NULL != (e = h->table[i])) {
+ h->table[i] = e->next;
+ index = indexFor(newsize,e->h);
+ e->next = newtable[index];
+ newtable[index] = e;
+ }
+ }
+ free(h->table);
+ h->table = newtable;
+ }
+ /* Plan B: realloc instead */
+ else
+ {
+ newtable = (struct entry **)
+ realloc(h->table, newsize * sizeof(struct entry *));
+ if (NULL == newtable) { (h->primeindex)--; return 0; }
+ h->table = newtable;
+ memset(newtable[h->tablelength], 0, newsize - h->tablelength);
+ for (i = 0; i < h->tablelength; i++) {
+ for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
+ index = indexFor(newsize,e->h);
+ if (index == i)
+ {
+ pE = &(e->next);
+ }
+ else
+ {
+ *pE = e->next;
+ e->next = newtable[index];
+ newtable[index] = e;
+ }
+ }
+ }
+ }
+ h->tablelength = newsize;
+ h->loadlimit = (unsigned int)
+ (((uint64_t)newsize * max_load_factor) / 100);
+ return -1;
+}
+
+/*****************************************************************************/
+unsigned int
+hashtable_count(struct hashtable *h)
+{
+ return h->entrycount;
+}
+
+/*****************************************************************************/
+int
+hashtable_insert(struct hashtable *h, void *k, void *v)
+{
+ /* This method allows duplicate keys - but they shouldn't be used */
+ unsigned int index;
+ struct entry *e;
+ if (++(h->entrycount) > h->loadlimit)
+ {
+ /* Ignore the return value. If expand fails, we should
+ * still try cramming just this value into the existing table
+ * -- we may not have memory for a larger table, but one more
+ * element may be ok. Next time we insert, we'll try expanding again.*/
+ hashtable_expand(h);
+ }
+ e = (struct entry *)malloc(sizeof(struct entry));
+ if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
+ e->h = hash(h,k);
+ index = indexFor(h->tablelength,e->h);
+ e->k = k;
+ e->v = v;
+ e->next = h->table[index];
+ h->table[index] = e;
+ return -1;
+}
+
+/*****************************************************************************/
+void * /* returns value associated with key */
+hashtable_search(struct hashtable *h, void *k)
+{
+ struct entry *e;
+ unsigned int hashvalue, index;
+ hashvalue = hash(h,k);
+ index = indexFor(h->tablelength,hashvalue);
+ e = h->table[index];
+ while (NULL != e)
+ {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) return e->v;
+ e = e->next;
+ }
+ return NULL;
+}
+
+/*****************************************************************************/
+void * /* returns value associated with key */
+hashtable_remove(struct hashtable *h, void *k)
+{
+ /* TODO: consider compacting the table when the load factor drops enough,
+ * or provide a 'compact' method. */
+
+ struct entry *e;
+ struct entry **pE;
+ void *v;
+ unsigned int hashvalue, index;
+
+ hashvalue = hash(h,k);
+ index = indexFor(h->tablelength,hash(h,k));
+ pE = &(h->table[index]);
+ e = *pE;
+ while (NULL != e)
+ {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hashvalue == e->h) && (h->eqfn(k, e->k)))
+ {
+ *pE = e->next;
+ h->entrycount--;
+ v = e->v;
+ freekey(e->k);
+ free(e);
+ return v;
+ }
+ pE = &(e->next);
+ e = e->next;
+ }
+ return NULL;
+}
+
+/*****************************************************************************/
+/* destroy */
+void
+hashtable_destroy(struct hashtable *h, int free_values)
+{
+ unsigned int i;
+ struct entry *e, *f;
+ struct entry **table = h->table;
+ if (free_values)
+ {
+ for (i = 0; i < h->tablelength; i++)
+ {
+ e = table[i];
+ while (NULL != e)
+ { f = e; e = e->next; freekey(f->k); free(f->v); free(f); }
+ }
+ }
+ else
+ {
+ for (i = 0; i < h->tablelength; i++)
+ {
+ e = table[i];
+ while (NULL != e)
+ { f = e; e = e->next; freekey(f->k); free(f); }
+ }
+ }
+ free(h->table);
+ free(h);
+}
+
+/*
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/hashtable.h
--- /dev/null Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/hashtable.h Mon Mar 6 17:21:35 2006
@@ -0,0 +1,199 @@
+/* Copyright (C) 2002 Christopher Clark <firstname.lastname@xxxxxxxxxxxx> */
+
+#ifndef __HASHTABLE_CWC22_H__
+#define __HASHTABLE_CWC22_H__
+
+struct hashtable;
+
+/* Example of use:
+ *
+ * struct hashtable *h;
+ * struct some_key *k;
+ * struct some_value *v;
+ *
+ * static unsigned int hash_from_key_fn( void *k );
+ * static int keys_equal_fn ( void *key1, void *key2 );
+ *
+ * h = create_hashtable(16, hash_from_key_fn, keys_equal_fn);
+ * k = (struct some_key *) malloc(sizeof(struct some_key));
+ * v = (struct some_value *) malloc(sizeof(struct some_value));
+ *
+ * (initialise k and v to suitable values)
+ *
+ * if (! hashtable_insert(h,k,v) )
+ * { exit(-1); }
+ *
+ * if (NULL == (found = hashtable_search(h,k) ))
+ * { printf("not found!"); }
+ *
+ * if (NULL == (found = hashtable_remove(h,k) ))
+ * { printf("Not found\n"); }
+ *
+ */
+
+/* Macros may be used to define type-safe(r) hashtable access functions, with
+ * methods specialized to take known key and value types as parameters.
+ *
+ * Example:
+ *
+ * Insert this at the start of your file:
+ *
+ * DEFINE_HASHTABLE_INSERT(insert_some, struct some_key, struct some_value);
+ * DEFINE_HASHTABLE_SEARCH(search_some, struct some_key, struct some_value);
+ * DEFINE_HASHTABLE_REMOVE(remove_some, struct some_key, struct some_value);
+ *
+ * This defines the functions 'insert_some', 'search_some' and 'remove_some'.
+ * These operate just like hashtable_insert etc., with the same parameters,
+ * but their function signatures have 'struct some_key *' rather than
+ * 'void *', and hence can generate compile time errors if your program is
+ * supplying incorrect data as a key (and similarly for value).
+ *
+ * Note that the hash and key equality functions passed to create_hashtable
+ * still take 'void *' parameters instead of 'some key *'. This shouldn't be
+ * a difficult issue as they're only defined and passed once, and the other
+ * functions will ensure that only valid keys are supplied to them.
+ *
+ * The cost for this checking is increased code size and runtime overhead
+ * - if performance is important, it may be worth switching back to the
+ * unsafe methods once your program has been debugged with the safe methods.
+ * This just requires switching to some simple alternative defines - eg:
+ * #define insert_some hashtable_insert
+ *
+ */
+
+/*****************************************************************************
+ * create_hashtable
+
+ * @name create_hashtable
+ * @param minsize minimum initial size of hashtable
+ * @param hashfunction function for hashing keys
+ * @param key_eq_fn function for determining key equality
+ * @return newly created hashtable or NULL on failure
+ */
+
+struct hashtable *
+create_hashtable(unsigned int minsize,
+ unsigned int (*hashfunction) (void*),
+ int (*key_eq_fn) (void*,void*));
+
+/*****************************************************************************
+ * hashtable_insert
+
+ * @name hashtable_insert
+ * @param h the hashtable to insert into
+ * @param k the key - hashtable claims ownership and will free on removal
+ * @param v the value - does not claim ownership
+ * @return non-zero for successful insertion
+ *
+ * This function will cause the table to expand if the insertion would take
+ * the ratio of entries to table size over the maximum load factor.
+ *
+ * This function does not check for repeated insertions with a duplicate key.
+ * The value returned when using a duplicate key is undefined -- when
+ * the hashtable changes size, the order of retrieval of duplicate key
+ * entries is reversed.
+ * If in doubt, remove before insert.
+ */
+
+int
+hashtable_insert(struct hashtable *h, void *k, void *v);
+
+#define DEFINE_HASHTABLE_INSERT(fnname, keytype, valuetype) \
+int fnname (struct hashtable *h, keytype *k, valuetype *v) \
+{ \
+ return hashtable_insert(h,k,v); \
+}
+
+/*****************************************************************************
+ * hashtable_search
+
+ * @name hashtable_search
+ * @param h the hashtable to search
+ * @param k the key to search for - does not claim ownership
+ * @return the value associated with the key, or NULL if none found
+ */
+
+void *
+hashtable_search(struct hashtable *h, void *k);
+
+#define DEFINE_HASHTABLE_SEARCH(fnname, keytype, valuetype) \
+valuetype * fnname (struct hashtable *h, keytype *k) \
+{ \
+ return (valuetype *) (hashtable_search(h,k)); \
+}
+
+/*****************************************************************************
+ * hashtable_remove
+
+ * @name hashtable_remove
+ * @param h the hashtable to remove the item from
+ * @param k the key to search for - does not claim ownership
+ * @return the value associated with the key, or NULL if none found
+ */
+
+void * /* returns value */
+hashtable_remove(struct hashtable *h, void *k);
+
+#define DEFINE_HASHTABLE_REMOVE(fnname, keytype, valuetype) \
+valuetype * fnname (struct hashtable *h, keytype *k) \
+{ \
+ return (valuetype *) (hashtable_remove(h,k)); \
+}
+
+
+/*****************************************************************************
+ * hashtable_count
+
+ * @name hashtable_count
+ * @param h the hashtable
+ * @return the number of items stored in the hashtable
+ */
+unsigned int
+hashtable_count(struct hashtable *h);
+
+
+/*****************************************************************************
+ * hashtable_destroy
+
+ * @name hashtable_destroy
+ * @param h the hashtable
+ * @param free_values whether to call 'free' on the remaining values
+ */
+
+void
+hashtable_destroy(struct hashtable *h, int free_values);
+
+#endif /* __HASHTABLE_CWC22_H__ */
+
+/*
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/hashtable_private.h
--- /dev/null Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/hashtable_private.h Mon Mar 6 17:21:35 2006
@@ -0,0 +1,85 @@
+/* Copyright (C) 2002, 2004 Christopher Clark
<firstname.lastname@xxxxxxxxxxxx> */
+
+#ifndef __HASHTABLE_PRIVATE_CWC22_H__
+#define __HASHTABLE_PRIVATE_CWC22_H__
+
+#include "hashtable.h"
+
+/*****************************************************************************/
+struct entry
+{
+ void *k, *v;
+ unsigned int h;
+ struct entry *next;
+};
+
+struct hashtable {
+ unsigned int tablelength;
+ struct entry **table;
+ unsigned int entrycount;
+ unsigned int loadlimit;
+ unsigned int primeindex;
+ unsigned int (*hashfn) (void *k);
+ int (*eqfn) (void *k1, void *k2);
+};
+
+/*****************************************************************************/
+unsigned int
+hash(struct hashtable *h, void *k);
+
+/*****************************************************************************/
+/* indexFor */
+static inline unsigned int
+indexFor(unsigned int tablelength, unsigned int hashvalue) {
+ return (hashvalue % tablelength);
+};
+
+/* Only works if tablelength == 2^N */
+/*static inline unsigned int
+indexFor(unsigned int tablelength, unsigned int hashvalue)
+{
+ return (hashvalue & (tablelength - 1u));
+}
+*/
+
+/*****************************************************************************/
+#define freekey(X) free(X)
+/*define freekey(X) ; */
+
+
+/*****************************************************************************/
+
+#endif /* __HASHTABLE_PRIVATE_CWC22_H__*/
+
+/*
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
diff -r ede16886f979 -r c4ac21dc3f16 tools/xenstore/xenstore_control.c
--- /dev/null Mon Mar 6 16:09:18 2006
+++ b/tools/xenstore/xenstore_control.c Mon Mar 6 17:21:35 2006
@@ -0,0 +1,35 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "xs.h"
+
+
+int main(int argc, char **argv)
+{
+ struct xs_handle * xsh;
+
+ if (argc < 2 ||
+ strcmp(argv[1], "check"))
+ {
+ fprintf(stderr,
+ "Usage:\n"
+ "\n"
+ " %s check\n"
+ "\n", argv[0]);
+ return 2;
+ }
+
+ xsh = xs_daemon_open();
+
+ if (xsh == NULL) {
+ fprintf(stderr, "Failed to contact Xenstored.\n");
+ return 1;
+ }
+
+ xs_debug_command(xsh, argv[1], NULL, 0);
+
+ xs_daemon_close(xsh);
+
+ return 0;
+}
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/asm-ia64/guest_access.h
--- /dev/null Mon Mar 6 16:09:18 2006
+++ b/xen/include/asm-ia64/guest_access.h Mon Mar 6 17:21:35 2006
@@ -0,0 +1,63 @@
+/******************************************************************************
+ * guest_access.h
+ *
+ * Copyright (c) 2006, K A Fraser
+ */
+
+#ifndef __ASM_IA64_GUEST_ACCESS_H__
+#define __ASM_IA64_GUEST_ACCESS_H__
+
+#include <asm/uaccess.h>
+
+/* Is the guest handle a NULL reference? */
+#define guest_handle_is_null(hnd) ((hnd).p == NULL)
+
+/* Offset the given guest handle into the array it refers to. */
+#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr))
+
+/* Cast a guest handle to the specified type of handle. */
+#define guest_handle_cast(hnd, type) ({ \
+ type *_x = (hnd).p; \
+ (GUEST_HANDLE(type)) { _x }; \
+})
+
+/*
+ * Copy an array of objects to guest context via a guest handle,
+ * specifying an offset into the guest array.
+ */
+#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+})
+
+/*
+ * Copy an array of objects from guest context via a guest handle,
+ * specifying an offset into the guest array.
+ */
+#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+})
+
+/*
+ * Pre-validate a guest handle.
+ * Allows use of faster __copy_* functions.
+ */
+#define guest_handle_okay(hnd, nr) \
+ array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))
+
+#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+})
+
+#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+})
+
+#endif /* __ASM_IA64_GUEST_ACCESS_H__ */
diff -r ede16886f979 -r c4ac21dc3f16 xen/include/asm-x86/guest_access.h
--- /dev/null Mon Mar 6 16:09:18 2006
+++ b/xen/include/asm-x86/guest_access.h Mon Mar 6 17:21:35 2006
@@ -0,0 +1,63 @@
+/******************************************************************************
+ * guest_access.h
+ *
+ * Copyright (c) 2006, K A Fraser
+ */
+
+#ifndef __ASM_X86_GUEST_ACCESS_H__
+#define __ASM_X86_GUEST_ACCESS_H__
+
+#include <asm/uaccess.h>
+
+/* Is the guest handle a NULL reference? */
+#define guest_handle_is_null(hnd) ((hnd).p == NULL)
+
+/* Offset the given guest handle into the array it refers to. */
+#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr))
+
+/* Cast a guest handle to the specified type of handle. */
+#define guest_handle_cast(hnd, type) ({ \
+ type *_x = (hnd).p; \
+ (GUEST_HANDLE(type)) { _x }; \
+})
+
+/*
+ * Copy an array of objects to guest context via a guest handle,
+ * specifying an offset into the guest array.
+ */
+#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+})
+
+/*
+ * Copy an array of objects from guest context via a guest handle,
+ * specifying an offset into the guest array.
+ */
+#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+})
+
+/*
+ * Pre-validate a guest handle.
+ * Allows use of faster __copy_* functions.
+ */
+#define guest_handle_okay(hnd, nr) \
+ array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))
+
+#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+})
+
+#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+})
+
+#endif /* __ASM_X86_GUEST_ACCESS_H__ */
diff -r ede16886f979 -r c4ac21dc3f16
linux-2.6-xen-sparse/include/xen/public/xenstored.h
--- a/linux-2.6-xen-sparse/include/xen/public/xenstored.h Mon Mar 6
16:09:18 2006
+++ /dev/null Mon Mar 6 17:21:35 2006
@@ -1,89 +0,0 @@
-/*
- * Simple prototyle Xen Store Daemon providing simple tree-like database.
- * Copyright (C) 2005 Rusty Russell IBM Corporation
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _XENSTORED_H
-#define _XENSTORED_H
-
-enum xsd_sockmsg_type
-{
- XS_DEBUG,
- XS_SHUTDOWN,
- XS_DIRECTORY,
- XS_READ,
- XS_GET_PERMS,
- XS_WATCH,
- XS_WATCH_ACK,
- XS_UNWATCH,
- XS_TRANSACTION_START,
- XS_TRANSACTION_END,
- XS_OP_READ_ONLY = XS_TRANSACTION_END,
- XS_INTRODUCE,
- XS_RELEASE,
- XS_GET_DOMAIN_PATH,
- XS_WRITE,
- XS_MKDIR,
- XS_RM,
- XS_SET_PERMS,
- XS_WATCH_EVENT,
- XS_ERROR,
-};
-
-#define XS_WRITE_NONE "NONE"
-#define XS_WRITE_CREATE "CREATE"
-#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
-
-/* We hand errors as strings, for portability. */
-struct xsd_errors
-{
- int errnum;
- const char *errstring;
-};
-#define XSD_ERROR(x) { x, #x }
-static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
- XSD_ERROR(EINVAL),
- XSD_ERROR(EACCES),
- XSD_ERROR(EEXIST),
- XSD_ERROR(EISDIR),
- XSD_ERROR(ENOENT),
- XSD_ERROR(ENOMEM),
- XSD_ERROR(ENOSPC),
- XSD_ERROR(EIO),
- XSD_ERROR(ENOTEMPTY),
- XSD_ERROR(ENOSYS),
- XSD_ERROR(EROFS),
- XSD_ERROR(EBUSY),
- XSD_ERROR(EAGAIN),
- XSD_ERROR(EISCONN),
-};
-struct xsd_sockmsg
-{
- u32 type;
- u32 len; /* Length of data following this. */
-
- /* Generally followed by nul-terminated string(s). */
-};
-
-#endif /* _XENSTORED_H */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|