# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID cd914808acf1f5a32fc6fa575432cd96f9a4ad10
# Parent 903fb46f240eeafaeefbc32e31ea9a3b00fb906f
# Parent 5b30599761b3f0b85e6bb1768f0236294881e900
Merge
diff -r 903fb46f240e -r cd914808acf1 .hgignore
--- a/.hgignore Tue Jan 3 14:59:00 2006
+++ b/.hgignore Tue Jan 3 16:19:20 2006
@@ -181,6 +181,7 @@
^xen/TAGS$
^xen/arch/x86/asm-offsets\.s$
^xen/arch/x86/boot/mkelf32$
+^xen/arch/x86/xen\.lds$
^xen/ddb/.*$
^xen/include/asm$
^xen/include/asm-.*/asm-offsets\.h$
diff -r 903fb46f240e -r cd914808acf1
linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c Tue Jan 3 14:59:00 2006
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c Tue Jan 3 16:19:20 2006
@@ -389,6 +389,30 @@
return -ENOSYS;
}
+static int __init
+gnttab_proc_init(void)
+{
+ /*
+ * /proc/xen/grant : used by libxc to access grant tables
+ */
+ if ((grant_pde = create_xen_proc_entry("grant", 0600)) == NULL) {
+ WPRINTK("Unable to create grant xen proc entry\n");
+ return -1;
+ }
+
+ grant_file_ops.read = grant_pde->proc_fops->read;
+ grant_file_ops.write = grant_pde->proc_fops->write;
+
+ grant_pde->proc_fops = &grant_file_ops;
+
+ grant_pde->read_proc = &grant_read;
+ grant_pde->write_proc = &grant_write;
+
+ return 0;
+}
+
+device_initcall(gnttab_proc_init);
+
#endif /* CONFIG_PROC_FS */
int
@@ -446,29 +470,11 @@
gnttab_free_count = NR_GRANT_ENTRIES - NR_RESERVED_ENTRIES;
gnttab_free_head = NR_RESERVED_ENTRIES;
-#ifdef CONFIG_PROC_FS
- /*
- * /proc/xen/grant : used by libxc to access grant tables
- */
- if ((grant_pde = create_xen_proc_entry("grant", 0600)) == NULL) {
- WPRINTK("Unable to create grant xen proc entry\n");
- return -1;
- }
-
- grant_file_ops.read = grant_pde->proc_fops->read;
- grant_file_ops.write = grant_pde->proc_fops->write;
-
- grant_pde->proc_fops = &grant_file_ops;
-
- grant_pde->read_proc = &grant_read;
- grant_pde->write_proc = &grant_write;
-#endif
-
printk("Grant table initialized\n");
return 0;
}
-__initcall(gnttab_init);
+core_initcall(gnttab_init);
/*
* Local variables:
diff -r 903fb46f240e -r cd914808acf1
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Tue Jan 3
14:59:00 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Tue Jan 3
16:19:20 2006
@@ -331,7 +331,12 @@
return;
}
- xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
+ err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
+ info->xbdev->otherend);
+ return;
+ }
(void)xenbus_switch_state(info->xbdev, NULL, XenbusStateConnected);
diff -r 903fb46f240e -r cd914808acf1
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Tue Jan 3 14:59:00 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Tue Jan 3 16:19:20 2006
@@ -82,7 +82,7 @@
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
void netif_creditlimit(netif_t *netif);
-int netif_disconnect(netif_t *netif);
+void netif_disconnect(netif_t *netif);
netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
void free_netif(netif_t *netif);
diff -r 903fb46f240e -r cd914808acf1
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Tue Jan 3
14:59:00 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Tue Jan 3
16:19:20 2006
@@ -196,9 +196,13 @@
return 0;
netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
+ if (netif->tx_comms_area == NULL)
+ return -ENOMEM;
netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
- if (netif->tx_comms_area == NULL || netif->rx_comms_area == NULL)
+ if (netif->rx_comms_area == NULL) {
+ free_vm_area(netif->tx_comms_area);
return -ENOMEM;
+ }
err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
if (err) {
@@ -247,13 +251,9 @@
{
netif_t *netif = (netif_t *)arg;
- /* Already disconnected? */
- if (!netif->irq)
- return;
-
- unbind_from_irqhandler(netif->irq, netif);
- netif->irq = 0;
-
+ if (netif->irq)
+ unbind_from_irqhandler(netif->irq, netif);
+
unregister_netdev(netif->dev);
if (netif->tx.sring) {
@@ -290,10 +290,10 @@
#endif
}
-int netif_disconnect(netif_t *netif)
-{
-
- if (netif->status == CONNECTED) {
+void netif_disconnect(netif_t *netif)
+{
+ switch (netif->status) {
+ case CONNECTED:
rtnl_lock();
netif->status = DISCONNECTING;
wmb();
@@ -301,10 +301,14 @@
__netif_down(netif);
rtnl_unlock();
netif_put(netif);
- return 0; /* Caller should not send response message. */
- }
-
- return 1;
+ break;
+ case DISCONNECTED:
+ BUG_ON(atomic_read(&netif->refcnt) != 0);
+ free_netif(netif);
+ break;
+ default:
+ BUG();
+ }
}
/*
diff -r 903fb46f240e -r cd914808acf1
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Jan 3
14:59:00 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Jan 3
16:19:20 2006
@@ -116,6 +116,8 @@
#define RX_MAX_TARGET NET_RX_RING_SIZE
int rx_min_target, rx_max_target, rx_target;
struct sk_buff_head rx_batch;
+
+ struct timer_list rx_refill_timer;
/*
* {tx,rx}_skbs store outstanding skbuffs. The first entry in each
@@ -517,6 +519,13 @@
}
+static void rx_refill_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ netif_rx_schedule(dev);
+}
+
+
static void network_alloc_rx_buffers(struct net_device *dev)
{
unsigned short id;
@@ -534,7 +543,7 @@
* Allocate skbuffs greedily, even though we batch updates to the
* receive ring. This creates a less bursty demand on the memory
* allocator, so should reduce the chance of failed allocation requests
- * both for ourself and for other kernel subsystems.
+ * both for ourself and for other kernel subsystems.
*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
@@ -545,8 +554,15 @@
skb = alloc_xen_skb(
((PAGE_SIZE - sizeof(struct skb_shared_info)) &
(-SKB_DATA_ALIGN(1))) - 16);
- if (skb == NULL)
- break;
+ if (skb == NULL) {
+ /* Any skbuffs queued for refill? Force them out. */
+ if (i != 0)
+ goto refill;
+ /* Could not allocate any skbuffs. Try again later. */
+ mod_timer(&np->rx_refill_timer,
+ jiffies + (HZ/10));
+ return;
+ }
__skb_queue_tail(&np->rx_batch, skb);
}
@@ -554,6 +570,12 @@
if (i < (np->rx_target/2))
return;
+ /* Adjust our fill target if we risked running out of buffers. */
+ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > np->rx_max_target))
+ np->rx_target = np->rx_max_target;
+
+ refill:
for (i = 0; ; i++) {
if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
break;
@@ -608,11 +630,6 @@
/* Above is a suitable barrier to ensure backend will see requests. */
np->rx.req_prod_pvt = req_prod + i;
RING_PUSH_REQUESTS(&np->rx);
-
- /* Adjust our fill target if we risked running out of buffers. */
- if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
- ((np->rx_target *= 2) > np->rx_max_target))
- np->rx_target = np->rx_max_target;
}
@@ -1077,6 +1094,10 @@
np->rx_min_target = RX_MIN_TARGET;
np->rx_max_target = RX_MAX_TARGET;
+ init_timer(&np->rx_refill_timer);
+ np->rx_refill_timer.data = (unsigned long)netdev;
+ np->rx_refill_timer.function = rx_refill_timeout;
+
/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
for (i = 0; i <= NET_TX_RING_SIZE; i++) {
np->tx_skbs[i] = (void *)((unsigned long) i+1);
@@ -1188,33 +1209,14 @@
DPRINTK("%s\n", dev->nodename);
- netif_free(info);
- kfree(info);
+ netif_disconnect_backend(info);
+ free_netdev(info->netdev);
return 0;
}
-static void netif_free(struct netfront_info *info)
-{
- netif_disconnect_backend(info);
- close_netdev(info);
-}
-
-
static void close_netdev(struct netfront_info *info)
-{
- if (info->netdev) {
-#ifdef CONFIG_PROC_FS
- xennet_proc_delif(info->netdev);
-#endif
- unregister_netdev(info->netdev);
- info->netdev = NULL;
- }
-}
-
-
-static void netif_disconnect_backend(struct netfront_info *info)
{
/* Stop old i/f to prevent errors whilst we rebuild the state. */
spin_lock_irq(&info->tx_lock);
@@ -1223,17 +1225,37 @@
/* info->backend_state = BEST_DISCONNECTED; */
spin_unlock(&info->rx_lock);
spin_unlock_irq(&info->tx_lock);
-
+
+#ifdef CONFIG_PROC_FS
+ xennet_proc_delif(info->netdev);
+#endif
+
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, info->netdev);
+ info->evtchn = info->irq = 0;
+
+ del_timer_sync(&info->rx_refill_timer);
+
+ unregister_netdev(info->netdev);
+}
+
+
+static void netif_disconnect_backend(struct netfront_info *info)
+{
end_access(info->tx_ring_ref, info->tx.sring);
end_access(info->rx_ring_ref, info->rx.sring);
info->tx_ring_ref = GRANT_INVALID_REF;
info->rx_ring_ref = GRANT_INVALID_REF;
info->tx.sring = NULL;
info->rx.sring = NULL;
-
- if (info->irq)
- unbind_from_irqhandler(info->irq, info->netdev);
- info->evtchn = info->irq = 0;
+}
+
+
+static void netif_free(struct netfront_info *info)
+{
+ close_netdev(info);
+ netif_disconnect_backend(info);
+ free_netdev(info->netdev);
}
diff -r 903fb46f240e -r cd914808acf1 tools/examples/xmexample.vmx
--- a/tools/examples/xmexample.vmx Tue Jan 3 14:59:00 2006
+++ b/tools/examples/xmexample.vmx Tue Jan 3 16:19:20 2006
@@ -29,6 +29,9 @@
#-----------------------------------------------------------------------------
# the number of cpus guest platform has, default=1
vcpus=1
+
+# enable/disalbe vmx guest ACPI, default=0 (disabled)
+#acpi=0
# List of which CPUS this domain is allowed to use, default Xen picks
#cpus = "" # leave to Xen to pick
diff -r 903fb46f240e -r cd914808acf1 tools/firmware/vmxassist/Makefile
--- a/tools/firmware/vmxassist/Makefile Tue Jan 3 14:59:00 2006
+++ b/tools/firmware/vmxassist/Makefile Tue Jan 3 16:19:20 2006
@@ -24,7 +24,7 @@
# The emulator code lives in ROM space
TEXTADDR=0x000D0000
-DEFINES=-DDEBUG -D_ACPI_ -DTEXTADDR=$(TEXTADDR)
+DEFINES=-DDEBUG -DTEXTADDR=$(TEXTADDR)
XENINC=-I$(XEN_ROOT)/tools/libxc
LD = ld
diff -r 903fb46f240e -r cd914808acf1 tools/firmware/vmxassist/acpi_madt.c
--- a/tools/firmware/vmxassist/acpi_madt.c Tue Jan 3 14:59:00 2006
+++ b/tools/firmware/vmxassist/acpi_madt.c Tue Jan 3 16:19:20 2006
@@ -24,23 +24,75 @@
extern int puts(const char *s);
-#define VCPU_NR_PAGE 0x0009F000
-#define VCPU_NR_OFFSET 0x00000800
-#define VCPU_MAGIC 0x76637075 /* "vcpu" */
+#define HVM_INFO_PAGE 0x0009F000
+#define HVM_INFO_OFFSET 0x00000800
-/* xc_vmx_builder wrote vcpu block at 0x9F800. Return it. */
+struct hvm_info_table {
+ char signature[8]; /* "HVM INFO" */
+ uint32_t length;
+ uint8_t checksum;
+ uint8_t acpi_enabled;
+ uint8_t pad[2];
+ uint32_t nr_vcpus;
+};
+
+static struct hvm_info_table *table = NULL;
+
static int
+checksum_valid(uint8_t *ptr, int len)
+{
+ uint8_t sum=0;
+ int i;
+
+ for (i = 0; i < len; i++)
+ sum += ptr[i];
+
+ return (sum == 0);
+}
+
+/* xc_vmx_builder wrote hvm info at 0x9F800. Return it. */
+static struct hvm_info_table *
+get_hvm_info_table(void)
+{
+ struct hvm_info_table *t;
+ char signature[] = "HVM INFO";
+ int i;
+
+ if (table != NULL)
+ return table;
+
+ t = (struct hvm_info_table *)(HVM_INFO_PAGE + HVM_INFO_OFFSET);
+
+ /* strncmp(t->signature, "HVM INFO", 8) */
+ for (i = 0; i < 8; i++) {
+ if (signature[i] != t->signature[i]) {
+ puts("Bad hvm info signature\n");
+ return NULL;
+ }
+ }
+
+ if (!checksum_valid((uint8_t *)t, t->length)) {
+ puts("Bad hvm info checksum\n");
+ return NULL;
+ }
+
+ table = t;
+
+ return table;
+}
+
+int
get_vcpu_nr(void)
{
- unsigned int *vcpus;
+ struct hvm_info_table *t = get_hvm_info_table();
+ return (t ? t->nr_vcpus : 1); /* default 1 vcpu */
+}
- vcpus = (unsigned int *)(VCPU_NR_PAGE + VCPU_NR_OFFSET);
- if (vcpus[0] != VCPU_MAGIC) {
- puts("Bad vcpus magic, set vcpu number to 1 by default.\n");
- return 1;
- }
-
- return vcpus[1];
+int
+get_acpi_enabled(void)
+{
+ struct hvm_info_table *t = get_hvm_info_table();
+ return (t ? t->acpi_enabled : 0); /* default no acpi */
}
static void *
diff -r 903fb46f240e -r cd914808acf1 tools/firmware/vmxassist/vmxloader.c
--- a/tools/firmware/vmxassist/vmxloader.c Tue Jan 3 14:59:00 2006
+++ b/tools/firmware/vmxassist/vmxloader.c Tue Jan 3 16:19:20 2006
@@ -24,12 +24,10 @@
#include "machine.h"
#include "roms.h"
-#ifdef _ACPI_
#include "acpi.h"
#include "../acpi/acpi2_0.h" // for ACPI_PHYSICAL_ADDRESS
int acpi_madt_update(unsigned char* acpi_start);
-#endif
-
+int get_acpi_enabled(void);
/*
* C runtime start off
@@ -120,18 +118,17 @@
memcpy((void *)0xC0000,
vgabios_stdvga, sizeof(vgabios_stdvga));
}
-#ifdef _ACPI_
- puts("Loading ACPI ...\n");
- acpi_madt_update(acpi);
-
- if (ACPI_PHYSICAL_ADDRESS+sizeof(acpi) <= 0xF0000) {
- /* make sure acpi table does not overlap rombios
- * currently acpi less than 8K will be OK.
- */
- memcpy((void *)ACPI_PHYSICAL_ADDRESS, acpi, sizeof(acpi));
+ if (get_acpi_enabled() != 0) {
+ puts("Loading ACPI ...\n");
+ acpi_madt_update((unsigned char*)acpi);
+ if (ACPI_PHYSICAL_ADDRESS+sizeof(acpi) <= 0xF0000) {
+ /* make sure acpi table does not overlap rombios
+ * currently acpi less than 8K will be OK.
+ */
+ memcpy((void *)ACPI_PHYSICAL_ADDRESS, acpi,
sizeof(acpi));
+ }
}
-#endif
puts("Loading VMXAssist ...\n");
memcpy((void *)TEXTADDR, vmxassist, sizeof(vmxassist));
diff -r 903fb46f240e -r cd914808acf1 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c Tue Jan 3 14:59:00 2006
+++ b/tools/ioemu/vl.c Tue Jan 3 16:19:20 2006
@@ -2948,6 +2948,7 @@
case QEMU_OPTION_vcpus:
vcpus = atoi(optarg);
fprintf(logfile, "qemu: the number of cpus is %d\n", vcpus);
+ break;
case QEMU_OPTION_pci:
pci_enabled = 1;
break;
diff -r 903fb46f240e -r cd914808acf1 tools/libxc/Makefile
--- a/tools/libxc/Makefile Tue Jan 3 14:59:00 2006
+++ b/tools/libxc/Makefile Tue Jan 3 16:19:20 2006
@@ -27,6 +27,11 @@
ifeq ($(XEN_TARGET_ARCH),x86_32)
SRCS += xc_ptrace.c
SRCS += xc_ptrace_core.c
+SRCS += xc_pagetab.c
+endif
+
+ifeq ($(XEN_TARGET_ARCH),x86_64)
+SRCS += xc_pagetab.c
endif
BUILD_SRCS :=
diff -r 903fb46f240e -r cd914808acf1 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Tue Jan 3 14:59:00 2006
+++ b/tools/libxc/xc_domain.c Tue Jan 3 16:19:20 2006
@@ -404,6 +404,38 @@
}
+int xc_domain_irq_permission(int xc_handle,
+ uint32_t domid,
+ uint8_t pirq,
+ uint8_t allow_access)
+{
+ dom0_op_t op;
+
+ op.cmd = DOM0_IRQ_PERMISSION;
+ op.u.irq_permission.domain = domid;
+ op.u.irq_permission.pirq = pirq;
+ op.u.irq_permission.allow_access = allow_access;
+
+ return do_dom0_op(xc_handle, &op);
+}
+
+int xc_domain_iomem_permission(int xc_handle,
+ uint32_t domid,
+ unsigned long first_pfn,
+ unsigned long nr_pfns,
+ uint8_t allow_access)
+{
+ dom0_op_t op;
+
+ op.cmd = DOM0_IOMEM_PERMISSION;
+ op.u.iomem_permission.domain = domid;
+ op.u.iomem_permission.first_pfn = first_pfn;
+ op.u.iomem_permission.nr_pfns = nr_pfns;
+ op.u.iomem_permission.allow_access = allow_access;
+
+ return do_dom0_op(xc_handle, &op);
+}
+
/*
* Local variables:
* mode: C
diff -r 903fb46f240e -r cd914808acf1 tools/libxc/xc_vmx_build.c
--- a/tools/libxc/xc_vmx_build.c Tue Jan 3 14:59:00 2006
+++ b/tools/libxc/xc_vmx_build.c Tue Jan 3 16:19:20 2006
@@ -33,8 +33,17 @@
#define E820_MAP_NR_OFFSET 0x000001E8
#define E820_MAP_OFFSET 0x000002D0
-#define VCPU_NR_PAGE 0x0009F000
-#define VCPU_NR_OFFSET 0x00000800
+#define HVM_INFO_PAGE 0x0009F000
+#define HVM_INFO_OFFSET 0x00000800
+
+struct hvm_info_table {
+ char signature[8]; /* "HVM INFO" */
+ uint32_t length;
+ uint8_t checksum;
+ uint8_t acpi_enabled;
+ uint8_t pad[2];
+ uint32_t nr_vcpus;
+};
struct e820entry {
uint64_t addr;
@@ -119,26 +128,45 @@
return (*(((unsigned char *)e820_page) + E820_MAP_NR_OFFSET) = nr_map);
}
+static void
+set_hvm_info_checksum(struct hvm_info_table *t)
+{
+ uint8_t *ptr = (uint8_t *)t, sum = 0;
+ unsigned int i;
+
+ t->checksum = 0;
+
+ for (i = 0; i < t->length; i++)
+ sum += *ptr++;
+
+ t->checksum = -sum;
+}
+
/*
- * Use E820 reserved memory 0x9F800 to pass number of vcpus to vmxloader
- * vmxloader will use it to config ACPI MADT table
+ * Use E820 reserved memory 0x9F800 to pass HVM info to vmxloader
+ * vmxloader will use this info to set BIOS accordingly
*/
-#define VCPU_MAGIC 0x76637075 /* "vcpu" */
-static int set_vcpu_nr(int xc_handle, uint32_t dom,
- unsigned long *pfn_list, unsigned int vcpus)
-{
- char *va_map;
- unsigned int *va_vcpus;
+static int set_hvm_info(int xc_handle, uint32_t dom,
+ unsigned long *pfn_list, unsigned int vcpus,
+ unsigned int acpi)
+{
+ char *va_map;
+ struct hvm_info_table *va_hvm;
va_map = xc_map_foreign_range(xc_handle, dom,
PAGE_SIZE, PROT_READ|PROT_WRITE,
- pfn_list[VCPU_NR_PAGE >> PAGE_SHIFT]);
+ pfn_list[HVM_INFO_PAGE >> PAGE_SHIFT]);
if ( va_map == NULL )
return -1;
- va_vcpus = (unsigned int *)(va_map + VCPU_NR_OFFSET);
- va_vcpus[0] = VCPU_MAGIC;
- va_vcpus[1] = vcpus;
+ va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET);
+ memset(va_hvm, 0, sizeof(*va_hvm));
+ strncpy(va_hvm->signature, "HVM INFO", 8);
+ va_hvm->length = sizeof(struct hvm_info_table);
+ va_hvm->acpi_enabled = acpi;
+ va_hvm->nr_vcpus = vcpus;
+
+ set_hvm_info_checksum(va_hvm);
munmap(va_map, PAGE_SIZE);
@@ -281,6 +309,7 @@
unsigned int control_evtchn,
unsigned int lapic,
unsigned int vcpus,
+ unsigned int acpi,
unsigned int store_evtchn,
unsigned long *store_mfn)
{
@@ -490,8 +519,8 @@
goto error_out;
}
- if (set_vcpu_nr(xc_handle, dom, page_array, vcpus)) {
- fprintf(stderr, "Couldn't set vcpu number for VMX guest.\n");
+ if (set_hvm_info(xc_handle, dom, page_array, vcpus, acpi)) {
+ fprintf(stderr, "Couldn't set hvm info for VMX guest.\n");
goto error_out;
}
@@ -574,29 +603,6 @@
return -1;
}
-#define VMX_FEATURE_FLAG 0x20
-
-static int vmx_identify(void)
-{
- int eax, ecx;
-
- __asm__ __volatile__ (
-#if defined(__i386__)
- "push %%ebx; cpuid; pop %%ebx"
-#elif defined(__x86_64__)
- "push %%rbx; cpuid; pop %%rbx"
-#endif
- : "=a" (eax), "=c" (ecx)
- : "0" (1)
- : "dx");
-
- if (!(ecx & VMX_FEATURE_FLAG)) {
- return -1;
- }
-
- return 0;
-}
-
int xc_vmx_build(int xc_handle,
uint32_t domid,
int memsize,
@@ -604,6 +610,7 @@
unsigned int control_evtchn,
unsigned int lapic,
unsigned int vcpus,
+ unsigned int acpi,
unsigned int store_evtchn,
unsigned long *store_mfn)
{
@@ -613,10 +620,18 @@
unsigned long nr_pages;
char *image = NULL;
unsigned long image_size;
-
- if ( vmx_identify() < 0 )
- {
- PERROR("CPU doesn't support VMX Extensions");
+ xen_capabilities_info_t xen_caps;
+
+ if ( (rc = xc_version(xc_handle, XENVER_capabilities, &xen_caps)) != 0 )
+ {
+ PERROR("Failed to get xen version info");
+ goto error_out;
+ }
+
+ if ( !strstr(xen_caps, "hvm") )
+ {
+ PERROR("CPU doesn't support VMX Extensions or "
+ "CPU VMX Extensions are not turned on");
goto error_out;
}
@@ -659,7 +674,7 @@
if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
ctxt, op.u.getdomaininfo.shared_info_frame,
control_evtchn,
- lapic, vcpus, store_evtchn, store_mfn) < 0)
+ lapic, vcpus, acpi, store_evtchn, store_mfn) < 0)
{
ERROR("Error constructing guest OS");
goto error_out;
diff -r 903fb46f240e -r cd914808acf1 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Tue Jan 3 14:59:00 2006
+++ b/tools/libxc/xenctrl.h Tue Jan 3 16:19:20 2006
@@ -380,6 +380,17 @@
uint32_t nr_ports,
uint32_t allow_access);
+int xc_domain_irq_permission(int xc_handle,
+ uint32_t domid,
+ uint8_t pirq,
+ uint8_t allow_access);
+
+int xc_domain_iomem_permission(int xc_handle,
+ uint32_t domid,
+ unsigned long first_pfn,
+ unsigned long nr_pfns,
+ uint8_t allow_access);
+
unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
unsigned long mfn);
@@ -415,6 +426,19 @@
void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
unsigned long *arr, int num );
+
+/**
+ * Translates a virtual address in the context of a given domain and
+ * vcpu returning the machine page frame number of the associated
+ * page.
+ *
+ * @parm xc_handle a handle on an open hypervisor interface
+ * @parm dom the domain to perform the translation in
+ * @parm vcpu the vcpu to perform the translation on
+ * @parm virt the virtual address to translate
+ */
+unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
+ int vcpu, unsigned long long virt);
int xc_get_pfn_list(int xc_handle, uint32_t domid, unsigned long *pfn_buf,
unsigned long max_pfns);
diff -r 903fb46f240e -r cd914808acf1 tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h Tue Jan 3 14:59:00 2006
+++ b/tools/libxc/xenguest.h Tue Jan 3 16:19:20 2006
@@ -58,6 +58,7 @@
unsigned int control_evtchn,
unsigned int lapic,
unsigned int vcpus,
+ unsigned int acpi,
unsigned int store_evtchn,
unsigned long *store_mfn);
diff -r 903fb46f240e -r cd914808acf1 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Tue Jan 3 14:59:00 2006
+++ b/tools/python/xen/lowlevel/xc/xc.c Tue Jan 3 16:19:20 2006
@@ -364,19 +364,20 @@
int control_evtchn, store_evtchn;
int vcpus = 1;
int lapic = 0;
+ int acpi = 0;
int memsize;
unsigned long store_mfn = 0;
static char *kwd_list[] = { "dom", "control_evtchn", "store_evtchn",
- "memsize", "image", "lapic", "vcpus", NULL };
-
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisii", kwd_list,
+ "memsize", "image", "lapic", "vcpus",
"acpi",NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisiii", kwd_list,
&dom, &control_evtchn, &store_evtchn,
- &memsize, &image, &lapic, &vcpus) )
+ &memsize, &image, &lapic, &vcpus,&acpi) )
return NULL;
if ( xc_vmx_build(self->xc_handle, dom, memsize, image, control_evtchn,
- lapic, vcpus, store_evtchn, &store_mfn) != 0 )
+ lapic, vcpus, acpi, store_evtchn, &store_mfn) != 0 )
return PyErr_SetFromErrno(xc_error);
return Py_BuildValue("{s:i}", "store_mfn", store_mfn);
@@ -774,6 +775,52 @@
return zero;
}
+static PyObject *pyxc_domain_irq_permission(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+ uint32_t dom;
+ int pirq, allow_access, ret;
+
+ static char *kwd_list[] = { "dom", "pirq", "allow_access", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwd_list,
+ &dom, &pirq, &allow_access) )
+ return NULL;
+
+ ret = xc_domain_irq_permission(
+ xc->xc_handle, dom, pirq, allow_access);
+ if ( ret != 0 )
+ return PyErr_SetFromErrno(xc_error);
+
+ Py_INCREF(zero);
+ return zero;
+}
+
+static PyObject *pyxc_domain_iomem_permission(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+ uint32_t dom;
+ unsigned long first_pfn, nr_pfns, allow_access, ret;
+
+ static char *kwd_list[] = { "dom", "first_pfn", "nr_pfns", "allow_access",
NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "illi", kwd_list,
+ &dom, &first_pfn, &nr_pfns,
&allow_access) )
+ return NULL;
+
+ ret = xc_domain_iomem_permission(
+ xc->xc_handle, dom, first_pfn, nr_pfns, allow_access);
+ if ( ret != 0 )
+ return PyErr_SetFromErrno(xc_error);
+
+ Py_INCREF(zero);
+ return zero;
+}
+
static PyObject *dom_op(XcObject *self, PyObject *args,
int (*fn)(int, uint32_t))
@@ -1067,6 +1114,25 @@
" dom [int]: Identifier of domain to be allowed access.\n"
" first_port [int]: First IO port\n"
" nr_ports [int]: Number of IO ports\n"
+ " allow_access [int]: Non-zero means enable access; else disable
access\n\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
+
+ { "domain_irq_permission",
+ (PyCFunction)pyxc_domain_irq_permission,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Allow a domain access to a physical IRQ\n"
+ " dom [int]: Identifier of domain to be allowed access.\n"
+ " pirq [int]: The Physical IRQ\n"
+ " allow_access [int]: Non-zero means enable access; else disable
access\n\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
+
+ { "domain_iomem_permission",
+ (PyCFunction)pyxc_domain_iomem_permission,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Allow a domain access to a range of IO memory pages\n"
+ " dom [int]: Identifier of domain to be allowed access.\n"
+ " first_pfn [long]: First page of I/O Memory\n"
+ " nr_pfns [long]: Number of pages of I/O Memory (>0)\n"
" allow_access [int]: Non-zero means enable access; else disable
access\n\n"
"Returns: [int] 0 on success; -1 on error.\n" },
diff -r 903fb46f240e -r cd914808acf1 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py Tue Jan 3 14:59:00 2006
+++ b/tools/python/xen/xend/image.py Tue Jan 3 16:19:20 2006
@@ -189,11 +189,16 @@
def configure(self, imageConfig, deviceConfig):
ImageHandler.configure(self, imageConfig, deviceConfig)
+ info = xc.xeninfo()
+ if not 'hvm' in info['xen_caps']:
+ raise VmError("vmx: not an Intel VT platform, we stop creating!")
+
self.dmargs = self.parseDeviceModelArgs(imageConfig, deviceConfig)
self.device_model = sxp.child_value(imageConfig, 'device_model')
if not self.device_model:
raise VmError("vmx: missing device model")
self.display = sxp.child_value(imageConfig, 'display')
+ self.xauthority = sxp.child_value(imageConfig, 'xauthority')
self.vm.storeVm(("image/dmargs", " ".join(self.dmargs)),
("image/device-model", self.device_model),
@@ -209,6 +214,8 @@
if not lapic is None:
self.lapic = int(lapic)
+ self.acpi = int(sxp.child_value(imageConfig, 'acpi', 0))
+
def buildDomain(self):
# Create an event channel
self.device_channel = xc.evtchn_alloc_unbound(dom=self.vm.getDomid(),
@@ -224,6 +231,7 @@
log.debug("memsize = %d", self.vm.getMemoryTarget() / 1024)
log.debug("lapic = %d", self.lapic)
log.debug("vcpus = %d", self.vm.getVCpuCount())
+ log.debug("acpi = %d", self.acpi)
return xc.vmx_build(dom = self.vm.getDomid(),
image = self.kernel,
@@ -231,8 +239,8 @@
store_evtchn = store_evtchn,
memsize = self.vm.getMemoryTarget() / 1024,
lapic = self.lapic,
+ acpi = self.acpi,
vcpus = self.vm.getVCpuCount())
-
# Return a list of cmd line args to the device models based on the
# xm config file
@@ -264,44 +272,44 @@
nics = 0
for (name, info) in deviceConfig:
if name == 'vbd':
- uname = sxp.child_value(info, 'uname')
- typedev = sxp.child_value(info, 'dev')
- (_, vbdparam) = string.split(uname, ':', 1)
- if re.match('^ioemu:', typedev):
- (emtype, vbddev) = string.split(typedev, ':', 1)
- else:
- emtype = 'vbd'
- vbddev = typedev
- if emtype != 'ioemu':
- continue;
- vbddev_list = ['hda', 'hdb', 'hdc', 'hdd']
- if vbddev not in vbddev_list:
- raise VmError("vmx: for qemu vbd type=file&dev=hda~hdd")
- ret.append("-%s" % vbddev)
- ret.append("%s" % vbdparam)
+ uname = sxp.child_value(info, 'uname')
+ typedev = sxp.child_value(info, 'dev')
+ (_, vbdparam) = string.split(uname, ':', 1)
+ if 'ioemu:' in typedev:
+ (emtype, vbddev) = string.split(typedev, ':', 1)
+ else:
+ emtype = 'vbd'
+ vbddev = typedev
+ if emtype == 'vbd':
+ continue;
+ vbddev_list = ['hda', 'hdb', 'hdc', 'hdd']
+ if vbddev not in vbddev_list:
+ raise VmError("vmx: for qemu vbd type=file&dev=hda~hdd")
+ ret.append("-%s" % vbddev)
+ ret.append("%s" % vbdparam)
if name == 'vif':
- type = sxp.child_value(info, 'type')
- if type != 'ioemu':
- continue
- nics += 1
- if mac != None:
- continue
- mac = sxp.child_value(info, 'mac')
- bridge = sxp.child_value(info, 'bridge')
- if mac == None:
- mac = randomMAC()
- if bridge == None:
- bridge = 'xenbr0'
- ret.append("-macaddr")
- ret.append("%s" % mac)
- ret.append("-bridge")
- ret.append("%s" % bridge)
+ type = sxp.child_value(info, 'type')
+ if type != 'ioemu':
+ continue
+ nics += 1
+ if mac != None:
+ continue
+ mac = sxp.child_value(info, 'mac')
+ bridge = sxp.child_value(info, 'bridge')
+ if mac == None:
+ mac = randomMAC()
+ if bridge == None:
+ bridge = 'xenbr0'
+ ret.append("-macaddr")
+ ret.append("%s" % mac)
+ ret.append("-bridge")
+ ret.append("%s" % bridge)
if name == 'vtpm':
- instance = sxp.child_value(info, 'pref_instance')
- ret.append("-instance")
- ret.append("%s" % instance)
+ instance = sxp.child_value(info, 'pref_instance')
+ ret.append("-instance")
+ ret.append("%s" % instance)
ret.append("-nics")
- ret.append("%d" % nics)
+ ret.append("%d" % nics)
return ret
def configVNC(self, config):
@@ -340,6 +348,8 @@
env = dict(os.environ)
if self.display:
env['DISPLAY'] = self.display
+ if self.xauthority:
+ env['XAUTHORITY'] = self.xauthority
log.info("spawning device models: %s %s", self.device_model, args)
self.pid = os.spawnve(os.P_NOWAIT, self.device_model, args, env)
log.info("device model pid: %d", self.pid)
diff -r 903fb46f240e -r cd914808acf1 tools/python/xen/xend/server/blkif.py
--- a/tools/python/xen/xend/server/blkif.py Tue Jan 3 14:59:00 2006
+++ b/tools/python/xen/xend/server/blkif.py Tue Jan 3 16:19:20 2006
@@ -31,7 +31,7 @@
"""Block device interface controller. Handles all block devices
for a domain.
"""
-
+
def __init__(self, vm):
"""Create a block device controller.
"""
@@ -40,9 +40,9 @@
def getDeviceDetails(self, config):
"""@see DevController.getDeviceDetails"""
-
+
dev = sxp.child_value(config, 'dev')
- if re.match('^ioemu:', dev):
+ if 'ioemu:' in dev:
return (None,{},{})
devid = blkif.blkdev_name_to_number(dev)
diff -r 903fb46f240e -r cd914808acf1 tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py Tue Jan 3 14:59:00 2006
+++ b/tools/python/xen/xm/create.py Tue Jan 3 16:19:20 2006
@@ -164,6 +164,10 @@
fn=set_int, default=0,
use="Disable or enable local APIC of VMX domain.")
+gopts.var('acpi', val='ACPI',
+ fn=set_int, default=0,
+ use="Disable or enable ACPI of VMX domain.")
+
gopts.var('vcpus', val='VCPUS',
fn=set_int, default=1,
use="# of Virtual CPUS in domain.")
@@ -387,6 +391,10 @@
gopts.var('display', val='DISPLAY',
fn=set_value, default=None,
use="X11 display to use")
+
+gopts.var('xauthority', val='XAUTHORITY',
+ fn=set_value, default=None,
+ use="X11 Authority to use")
def err(msg):
@@ -526,7 +534,8 @@
"""
args = [ 'device_model', 'vcpus', 'cdrom', 'boot', 'fda', 'fdb',
'localtime', 'serial', 'stdvga', 'isa', 'nographic', 'audio',
- 'vnc', 'vncviewer', 'sdl', 'display', 'ne2000', 'lapic']
+ 'vnc', 'vncviewer', 'sdl', 'display', 'ne2000', 'lapic',
+ 'xauthority', 'acpi' ]
for a in args:
if (vals.__dict__[a]):
config_image.append([a, vals.__dict__[a]])
@@ -801,6 +810,9 @@
if not gopts.vals.display:
gopts.vals.display = os.getenv("DISPLAY")
+ if not gopts.vals.xauthority:
+ gopts.vals.xauthority = os.getenv("XAUTHORITY")
+
# Process remaining args as config variables.
for arg in args:
if '=' in arg:
diff -r 903fb46f240e -r cd914808acf1 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py Tue Jan 3 14:59:00 2006
+++ b/tools/python/xen/xm/main.py Tue Jan 3 16:19:20 2006
@@ -75,7 +75,7 @@
vcpu_set_help = """vcpu-set <DomId> <VCPUs> Set the number of VCPUs
for a domain"""
vcpu_list_help = "vcpu-list <DomId> List the VCPUs for a domain
(or all domains)"
vcpu_pin_help = "vcpu-pin <DomId> <VCPU> <CPUs> Set which cpus a VCPU can
use"
-dmesg_help = "dmesg [--clear] Read or clear Xen's message
buffer"
+dmesg_help = "dmesg [-c|--clear] Read or clear Xen's message
buffer"
info_help = "info Get information about the xen
host"
rename_help = "rename <DomId> <New Name> Rename a domain"
log_help = "log Print the xend log"
@@ -672,7 +672,7 @@
server.xend_node_clear_dmesg()
def xm_log(args):
- arg_check(args, 'xm-log', 0)
+ arg_check(args, "log", 0)
from xen.xend.XendClient import server
print server.xend_node_log()
@@ -845,8 +845,8 @@
"balloon": "mem-set",
"set-vcpus": "vcpu-set",
"vif-list": "network-list",
- "vbd-create": "block-create",
- "vbd-destroy": "block-destroy",
+ "vbd-create": "block-attach",
+ "vbd-destroy": "block-detach",
"vbd-list": "block-list",
}
diff -r 903fb46f240e -r cd914808acf1 tools/vtpm_manager/README
--- a/tools/vtpm_manager/README Tue Jan 3 14:59:00 2006
+++ b/tools/vtpm_manager/README Tue Jan 3 16:19:20 2006
@@ -53,11 +53,6 @@
MANUAL_DM_LAUNCH -> Must manually launch & kill VTPMs
-WELL_KNOWN_SRK_AUTH -> Rather than randomly generating the password
for the SRK,
- use a well known value. This is necessary for
sharing use
- of the SRK across applications. Such as VTPM
and Dom0
- measurement software.
-
WELL_KNOWN_OWNER_AUTH -> Rather than randomly generating the password
for the owner,
use a well known value. This is useful for
debugging and for
poor bios which do not support clearing TPM if
OwnerAuth is
diff -r 903fb46f240e -r cd914808acf1 tools/vtpm_manager/Rules.mk
--- a/tools/vtpm_manager/Rules.mk Tue Jan 3 14:59:00 2006
+++ b/tools/vtpm_manager/Rules.mk Tue Jan 3 16:19:20 2006
@@ -56,8 +56,7 @@
# Do not have manager launch DMs.
#CFLAGS += -DMANUAL_DM_LAUNCH
-# Fixed SRK
-CFLAGS += -DWELL_KNOWN_SRK_AUTH
+# Fixed OwnerAuth
#CFLAGS += -DWELL_KNOWN_OWNER_AUTH
# TPM Hardware Device or TPM Simulator
diff -r 903fb46f240e -r cd914808acf1 tools/vtpm_manager/manager/securestorage.c
--- a/tools/vtpm_manager/manager/securestorage.c Tue Jan 3 14:59:00 2006
+++ b/tools/vtpm_manager/manager/securestorage.c Tue Jan 3 16:19:20 2006
@@ -65,7 +65,7 @@
UINT32 i;
struct pack_constbuf_t symkey_cipher32, data_cipher32;
- vtpmloginfo(VTPM_LOG_VTPM_DEEP, "Enveloping[%d]: 0x", buffer_len(inbuf));
+ vtpmloginfo(VTPM_LOG_VTPM_DEEP, "Enveloping Input[%d]: 0x",
buffer_len(inbuf));
for (i=0; i< buffer_len(inbuf); i++)
vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "%x ", inbuf->bytes[i]);
vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "\n");
@@ -94,6 +94,12 @@
BSG_TPM_SIZE32_DATA, &data_cipher32);
vtpmloginfo(VTPM_LOG_VTPM, "Saved %d bytes of E(symkey) + %d bytes of
E(data)\n", buffer_len(&symkey_cipher), buffer_len(&data_cipher));
+
+ vtpmloginfo(VTPM_LOG_VTPM_DEEP, "Enveloping Output[%d]: 0x",
buffer_len(sealed_data));
+ for (i=0; i< buffer_len(sealed_data); i++)
+ vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "%x ", sealed_data->bytes[i]);
+ vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "\n");
+
goto egress;
abort_egress:
@@ -125,7 +131,7 @@
memset(&symkey, 0, sizeof(symkey_t));
- vtpmloginfo(VTPM_LOG_VTPM_DEEP, "envelope decrypting[%ld]: 0x", cipher_size);
+ vtpmloginfo(VTPM_LOG_VTPM_DEEP, "Envelope Decrypt Input[%ld]: 0x",
cipher_size);
for (i=0; i< cipher_size; i++)
vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "%x ", cipher[i]);
vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "\n");
@@ -155,6 +161,11 @@
// Decrypt State
TPMTRY(TPM_DECRYPT_ERROR, Crypto_symcrypto_decrypt (&symkey, &data_cipher,
unsealed_data) );
+
+ vtpmloginfo(VTPM_LOG_VTPM_DEEP, "Envelope Decrypte Output[%d]: 0x",
buffer_len(unsealed_data));
+ for (i=0; i< buffer_len(unsealed_data); i++)
+ vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "%x ", unsealed_data->bytes[i]);
+ vtpmloginfomore(VTPM_LOG_VTPM_DEEP, "\n");
goto egress;
@@ -291,124 +302,175 @@
return status;
}
+
TPM_RESULT VTPM_SaveService(void) {
TPM_RESULT status=TPM_SUCCESS;
int fh, dmis=-1;
-
- BYTE *flat_global;
- int flat_global_size, bytes_written;
+
+ BYTE *flat_boot_key, *flat_dmis, *flat_enc;
+ buffer_t clear_flat_global, enc_flat_global;
UINT32 storageKeySize = buffer_len(&vtpm_globals->storageKeyWrap);
+ UINT32 bootKeySize = buffer_len(&vtpm_globals->bootKeyWrap);
struct pack_buf_t storage_key_pack = {storageKeySize,
vtpm_globals->storageKeyWrap.bytes};
-
+ struct pack_buf_t boot_key_pack = {bootKeySize,
vtpm_globals->bootKeyWrap.bytes};
+
struct hashtable_itr *dmi_itr;
VTPM_DMI_RESOURCE *dmi_res;
-
- UINT32 flat_global_full_size;
-
- // Global Values needing to be saved
- flat_global_full_size = 3*sizeof(TPM_DIGEST) + // Auths
- sizeof(UINT32) + // storagekeysize
- storageKeySize + // storage key
- hashtable_count(vtpm_globals->dmi_map) * // num DMIS
- (sizeof(UINT32) + 2*sizeof(TPM_DIGEST)); // Per DMI info
-
-
- flat_global = (BYTE *) malloc( flat_global_full_size);
-
- flat_global_size = BSG_PackList(flat_global, 4,
- BSG_TPM_AUTHDATA,
&vtpm_globals->owner_usage_auth,
- BSG_TPM_AUTHDATA,
&vtpm_globals->srk_usage_auth,
- BSG_TPM_SECRET,
&vtpm_globals->storage_key_usage_auth,
- BSG_TPM_SIZE32_DATA, &storage_key_pack);
-
+
+ UINT32 boot_key_size, flat_dmis_size;
+
+ // Initially fill these with buffer sizes for each data type. Later fill
+ // in actual size, once flattened.
+ boot_key_size = sizeof(UINT32) + // bootkeysize
+ bootKeySize; // boot key
+
+ TPMTRYRETURN(buffer_init(&clear_flat_global, 3*sizeof(TPM_DIGEST) + // Auths
+ sizeof(UINT32) +// storagekeysize
+ storageKeySize, NULL) ); //
storage key
+
+ flat_dmis_size = (hashtable_count(vtpm_globals->dmi_map) - 1) * // num DMIS
(-1 for Dom0)
+ (sizeof(UINT32) + 2*sizeof(TPM_DIGEST)); // Per DMI info
+
+ flat_boot_key = (BYTE *) malloc( boot_key_size );
+ flat_enc = (BYTE *) malloc( sizeof(UINT32) );
+ flat_dmis = (BYTE *) malloc( flat_dmis_size );
+
+ boot_key_size = BSG_PackList(flat_boot_key, 1,
+ BSG_TPM_SIZE32_DATA, &boot_key_pack);
+
+ BSG_PackList(clear_flat_global.bytes, 3,
+ BSG_TPM_AUTHDATA, &vtpm_globals->owner_usage_auth,
+ BSG_TPM_SECRET, &vtpm_globals->storage_key_usage_auth,
+ BSG_TPM_SIZE32_DATA, &storage_key_pack);
+
+ TPMTRYRETURN(envelope_encrypt(&clear_flat_global,
+ &vtpm_globals->bootKey,
+ &enc_flat_global) );
+
+ BSG_PackConst(buffer_len(&enc_flat_global), 4, flat_enc);
+
// Per DMI values to be saved
if (hashtable_count(vtpm_globals->dmi_map) > 0) {
-
+
dmi_itr = hashtable_iterator(vtpm_globals->dmi_map);
do {
dmi_res = (VTPM_DMI_RESOURCE *) hashtable_iterator_value(dmi_itr);
dmis++;
// No need to save dmi0.
- if (dmi_res->dmi_id == 0)
- continue;
-
-
- flat_global_size += BSG_PackList( flat_global + flat_global_size, 3,
- BSG_TYPE_UINT32, &dmi_res->dmi_id,
- BSG_TPM_DIGEST,
&dmi_res->NVM_measurement,
- BSG_TPM_DIGEST,
&dmi_res->DMI_measurement);
-
+ if (dmi_res->dmi_id == 0)
+ continue;
+
+
+ flat_dmis_size += BSG_PackList( flat_dmis + flat_dmis_size, 3,
+ BSG_TYPE_UINT32, &dmi_res->dmi_id,
+ BSG_TPM_DIGEST,
&dmi_res->NVM_measurement,
+ BSG_TPM_DIGEST,
&dmi_res->DMI_measurement);
+
} while (hashtable_iterator_advance(dmi_itr));
}
-
- //FIXME: Once we have a way to protect a TPM key, we should use it to
- // encrypt this blob. BUT, unless there is a way to ensure the key is
- // not used by other apps, this encryption is useless.
+
fh = open(STATE_FILE, O_WRONLY | O_CREAT, S_IREAD | S_IWRITE);
if (fh == -1) {
vtpmlogerror(VTPM_LOG_VTPM, "Unable to open %s file for write.\n",
STATE_FILE);
status = TPM_IOERROR;
goto abort_egress;
}
-
- if ( (bytes_written = write(fh, flat_global, flat_global_size)) !=
flat_global_size ) {
- vtpmlogerror(VTPM_LOG_VTPM, "Failed to save service data. %d/%d bytes
written.\n", bytes_written, flat_global_size);
- status = TPM_IOERROR;
- goto abort_egress;
- }
- vtpm_globals->DMI_table_dirty = FALSE;
-
+
+ if ( ( write(fh, flat_boot_key, boot_key_size) != boot_key_size ) ||
+ ( write(fh, flat_enc, sizeof(UINT32)) != sizeof(UINT32) ) ||
+ ( write(fh, enc_flat_global.bytes, buffer_len(&enc_flat_global)) !=
buffer_len(&enc_flat_global) ) ||
+ ( write(fh, flat_dmis, flat_dmis_size) != flat_dmis_size ) ) {
+ vtpmlogerror(VTPM_LOG_VTPM, "Failed to completely write service data.\n");
+ status = TPM_IOERROR;
+ goto abort_egress;
+ }
+
+ vtpm_globals->DMI_table_dirty = FALSE;
+
goto egress;
-
+
abort_egress:
egress:
-
- free(flat_global);
+
+ free(flat_boot_key);
+ free(flat_enc);
+ buffer_free(&enc_flat_global);
+ free(flat_dmis);
close(fh);
-
+
vtpmloginfo(VTPM_LOG_VTPM, "Saved VTPM Service state (status = %d, dmis =
%d)\n", (int) status, dmis);
return status;
}
TPM_RESULT VTPM_LoadService(void) {
-
+
TPM_RESULT status=TPM_SUCCESS;
int fh, stat_ret, dmis=0;
long fh_size = 0, step_size;
- BYTE *flat_global=NULL;
- struct pack_buf_t storage_key_pack;
- UINT32 *dmi_id_key;
-
+ BYTE *flat_table=NULL;
+ buffer_t unsealed_data;
+ struct pack_buf_t storage_key_pack, boot_key_pack;
+ UINT32 *dmi_id_key, enc_size;
+
VTPM_DMI_RESOURCE *dmi_res;
struct stat file_stat;
-
+
+ TPM_HANDLE boot_key_handle;
+ TPM_AUTHDATA boot_usage_auth;
+ memset(&boot_usage_auth, 0, sizeof(TPM_AUTHDATA));
+
fh = open(STATE_FILE, O_RDONLY );
stat_ret = fstat(fh, &file_stat);
- if (stat_ret == 0)
+ if (stat_ret == 0)
fh_size = file_stat.st_size;
else {
status = TPM_IOERROR;
goto abort_egress;
}
-
- flat_global = (BYTE *) malloc(fh_size);
-
- if ((long) read(fh, flat_global, fh_size) != fh_size ) {
- status = TPM_IOERROR;
- goto abort_egress;
- }
-
+
+ flat_table = (BYTE *) malloc(fh_size);
+
+ if ((long) read(fh, flat_table, fh_size) != fh_size ) {
+ status = TPM_IOERROR;
+ goto abort_egress;
+ }
+
+ // Read Boot Key
+ step_size = BSG_UnpackList( flat_table, 2,
+ BSG_TPM_SIZE32_DATA, &boot_key_pack,
+ BSG_TYPE_UINT32, &enc_size);
+
+ TPMTRYRETURN(buffer_init(&vtpm_globals->bootKeyWrap, 0, 0) );
+ TPMTRYRETURN(buffer_append_raw(&vtpm_globals->bootKeyWrap,
boot_key_pack.size, boot_key_pack.data) );
+
+ //Load Boot Key
+ TPMTRYRETURN( VTSP_LoadKey( vtpm_globals->manager_tcs_handle,
+ TPM_SRK_KEYHANDLE,
+ &vtpm_globals->bootKeyWrap,
+ &SRK_AUTH,
+ &boot_key_handle,
+ &vtpm_globals->keyAuth,
+ &vtpm_globals->bootKey,
+ FALSE) );
+
+ TPMTRYRETURN( envelope_decrypt(enc_size,
+ flat_table + step_size,
+ vtpm_globals->manager_tcs_handle,
+ boot_key_handle,
+ (const TPM_AUTHDATA*) &boot_usage_auth,
+ &unsealed_data) );
+ step_size += enc_size;
+
// Global Values needing to be saved
- step_size = BSG_UnpackList( flat_global, 4,
- BSG_TPM_AUTHDATA, &vtpm_globals->owner_usage_auth,
- BSG_TPM_AUTHDATA, &vtpm_globals->srk_usage_auth,
- BSG_TPM_SECRET,
&vtpm_globals->storage_key_usage_auth,
- BSG_TPM_SIZE32_DATA, &storage_key_pack);
-
+ BSG_UnpackList( unsealed_data.bytes, 3,
+ BSG_TPM_AUTHDATA, &vtpm_globals->owner_usage_auth,
+ BSG_TPM_SECRET, &vtpm_globals->storage_key_usage_auth,
+ BSG_TPM_SIZE32_DATA, &storage_key_pack);
+
TPMTRYRETURN(buffer_init(&vtpm_globals->storageKeyWrap, 0, 0) );
TPMTRYRETURN(buffer_append_raw(&vtpm_globals->storageKeyWrap,
storage_key_pack.size, storage_key_pack.data) );
-
+
// Per DMI values to be saved
while ( step_size < fh_size ){
if (fh_size - step_size < (long) (sizeof(UINT32) + 2*sizeof(TPM_DIGEST))) {
@@ -417,35 +479,38 @@
} else {
dmi_res = (VTPM_DMI_RESOURCE *) malloc(sizeof(VTPM_DMI_RESOURCE));
dmis++;
-
+
dmi_res->connected = FALSE;
-
- step_size += BSG_UnpackList(flat_global + step_size, 3,
- BSG_TYPE_UINT32, &dmi_res->dmi_id,
- BSG_TPM_DIGEST, &dmi_res->NVM_measurement,
- BSG_TPM_DIGEST, &dmi_res->DMI_measurement);
-
+
+ step_size += BSG_UnpackList(flat_table + step_size, 3,
+ BSG_TYPE_UINT32, &dmi_res->dmi_id,
+ BSG_TPM_DIGEST, &dmi_res->NVM_measurement,
+ BSG_TPM_DIGEST, &dmi_res->DMI_measurement);
+
// install into map
dmi_id_key = (UINT32 *) malloc (sizeof(UINT32));
*dmi_id_key = dmi_res->dmi_id;
if (!hashtable_insert(vtpm_globals->dmi_map, dmi_id_key, dmi_res)) {
- status = TPM_FAIL;
- goto abort_egress;
+ status = TPM_FAIL;
+ goto abort_egress;
}
-
+
}
-
- }
-
+
+ }
+
vtpmloginfo(VTPM_LOG_VTPM, "Loaded saved state (dmis = %d).\n", dmis);
goto egress;
-
+
abort_egress:
vtpmlogerror(VTPM_LOG_VTPM, "Failed to load service data with error = %s\n",
tpm_get_error_name(status));
egress:
-
- free(flat_global);
+
+ free(flat_table);
close(fh);
-
+
+ // TODO: Could be nice and evict BootKey. (Need to add EvictKey to VTSP.
+
return status;
}
+
diff -r 903fb46f240e -r cd914808acf1 tools/vtpm_manager/manager/vtpm_manager.c
--- a/tools/vtpm_manager/manager/vtpm_manager.c Tue Jan 3 14:59:00 2006
+++ b/tools/vtpm_manager/manager/vtpm_manager.c Tue Jan 3 16:19:20 2006
@@ -74,16 +74,15 @@
#endif
// --------------------------- Well Known Auths --------------------------
-#ifdef WELL_KNOWN_SRK_AUTH
-static BYTE FIXED_SRK_AUTH[20] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff,
+const TPM_AUTHDATA SRK_AUTH = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff};
-#endif
#ifdef WELL_KNOWN_OWNER_AUTH
static BYTE FIXED_OWNER_AUTH[20] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff};
#endif
-
+
+
// -------------------------- Hash table functions --------------------
static unsigned int hashfunc32(void *ky) {
@@ -100,13 +99,7 @@
TPM_RESULT status = TPM_SUCCESS;
- // Generate Auth's for SRK & Owner
-#ifdef WELL_KNOWN_SRK_AUTH
- memcpy(vtpm_globals->srk_usage_auth, FIXED_SRK_AUTH, sizeof(TPM_AUTHDATA));
-#else
- Crypto_GetRandom(vtpm_globals->srk_usage_auth, sizeof(TPM_AUTHDATA) );
-#endif
-
+ // Generate Auth for Owner
#ifdef WELL_KNOWN_OWNER_AUTH
memcpy(vtpm_globals->owner_usage_auth, FIXED_OWNER_AUTH,
sizeof(TPM_AUTHDATA));
#else
@@ -116,14 +109,14 @@
// Take Owership of TPM
CRYPTO_INFO ek_cryptoInfo;
- vtpmloginfo(VTPM_LOG_VTPM, "Attempting Pubek Read. NOTE: Failure is ok.\n");
status = VTSP_ReadPubek(vtpm_globals->manager_tcs_handle, &ek_cryptoInfo);
// If we can read PubEK then there is no owner and we should take it.
if (status == TPM_SUCCESS) {
+ vtpmloginfo(VTPM_LOG_VTPM, "Failed to readEK meaning TPM has an owner.
Creating Keys off existing SRK.\n");
TPMTRYRETURN(VTSP_TakeOwnership(vtpm_globals->manager_tcs_handle,
(const
TPM_AUTHDATA*)&vtpm_globals->owner_usage_auth,
- (const
TPM_AUTHDATA*)&vtpm_globals->srk_usage_auth,
+ &SRK_AUTH,
&ek_cryptoInfo,
&vtpm_globals->keyAuth));
@@ -142,7 +135,7 @@
TPMTRYRETURN( VTSP_OSAP(vtpm_globals->manager_tcs_handle,
TPM_ET_KEYHANDLE,
TPM_SRK_KEYHANDLE,
- (const TPM_AUTHDATA*)&vtpm_globals->srk_usage_auth,
+ &SRK_AUTH,
&sharedsecret,
&osap) );
@@ -157,8 +150,43 @@
&vtpm_globals->storageKeyWrap,
&osap) );
- vtpm_globals->keyAuth.fContinueAuthSession = TRUE;
-
+ // Generate boot key's auth
+ Crypto_GetRandom( &vtpm_globals->storage_key_usage_auth,
+ sizeof(TPM_AUTHDATA) );
+
+ TPM_AUTHDATA bootKeyWrapAuth;
+ memset(&bootKeyWrapAuth, 0, sizeof(bootKeyWrapAuth));
+
+ TPMTRYRETURN( VTSP_OSAP(vtpm_globals->manager_tcs_handle,
+ TPM_ET_KEYHANDLE,
+ TPM_SRK_KEYHANDLE,
+ &SRK_AUTH,
+ &sharedsecret,
+ &osap) );
+
+ osap.fContinueAuthSession = FALSE;
+
+ // FIXME: This key protects the global secrets on disk. It should use TPM
+ // PCR bindings to limit its use to legit configurations.
+ // Current binds are open, implying a Trusted VM contains this code.
+ // If this VM is not Trusted, use measurement and PCR bindings.
+ TPMTRYRETURN( VTSP_CreateWrapKey( vtpm_globals->manager_tcs_handle,
+ TPM_KEY_BIND,
+ (const TPM_AUTHDATA*)&bootKeyWrapAuth,
+ TPM_SRK_KEYHANDLE,
+ (const TPM_AUTHDATA*)&sharedsecret,
+ &vtpm_globals->bootKeyWrap,
+ &osap) );
+
+ // Populate CRYPTO_INFO vtpm_globals->bootKey. This does not load it into
the TPM
+ TPMTRYRETURN( VTSP_LoadKey( vtpm_globals->manager_tcs_handle,
+ TPM_SRK_KEYHANDLE,
+ &vtpm_globals->bootKeyWrap,
+ NULL,
+ NULL,
+ NULL,
+ &vtpm_globals->bootKey,
+ TRUE ) );
goto egress;
abort_egress:
@@ -278,24 +306,26 @@
#endif
// Check status of rx_fh. If necessary attempt to re-open it.
+ char* s = NULL;
if (*rx_fh < 0) {
#ifdef VTPM_MULTI_VM
- *rx_fh = open(VTPM_BE_DEV, O_RDWR);
+ s = VTPM_BE_DEV;
#else
if (threadType == BE_LISTENER_THREAD)
#ifdef DUMMY_BACKEND
- *rx_fh = open("/tmp/in.fifo", O_RDWR);
+ s = "/tmp/in.fifo";
#else
- *rx_fh = open(VTPM_BE_DEV, O_RDWR);
+ s = VTPM_BE_DEV;
#endif
else // DMI Listener
- *rx_fh = open(VTPM_RX_FIFO, O_RDWR);
+ s = VTPM_RX_FIFO;
+ *rx_fh = open(s, O_RDWR);
#endif
}
// Respond to failures to open rx_fh
if (*rx_fh < 0) {
- vtpmhandlerlogerror(VTPM_LOG_VTPM, "Can't open inbound fh.\n");
+ vtpmhandlerlogerror(VTPM_LOG_VTPM, "Can't open inbound fh for %s.\n", s);
#ifdef VTPM_MULTI_VM
return TPM_IOERROR;
#else
@@ -713,7 +743,7 @@
///////////////////////////////////////////////////////////////////////////////
TPM_RESULT VTPM_Init_Service() {
- TPM_RESULT status = TPM_FAIL;
+ TPM_RESULT status = TPM_FAIL, serviceStatus;
BYTE *randomsead;
UINT32 randomsize;
@@ -737,7 +767,7 @@
// Create new TCS Object
vtpm_globals->manager_tcs_handle = 0;
-
+
TPMTRYRETURN(TCS_create());
// Create TCS Context for service
@@ -756,17 +786,24 @@
vtpm_globals->keyAuth.fContinueAuthSession = TRUE;
// If failed, create new Service.
- if (VTPM_LoadService() != TPM_SUCCESS)
+ serviceStatus = VTPM_LoadService();
+ if (serviceStatus == TPM_IOERROR) {
+ vtpmloginfo(VTPM_LOG_VTPM, "Failed to read service file. Assuming first
time initialization.\n");
TPMTRYRETURN( VTPM_Create_Service() );
+ } else if (serviceStatus != TPM_SUCCESS) {
+ vtpmlogerror(VTPM_LOG_VTPM, "Failed to read existing service file");
+ exit(1);
+ }
//Load Storage Key
TPMTRYRETURN( VTSP_LoadKey( vtpm_globals->manager_tcs_handle,
TPM_SRK_KEYHANDLE,
&vtpm_globals->storageKeyWrap,
- (const
TPM_AUTHDATA*)&vtpm_globals->srk_usage_auth,
+ &SRK_AUTH,
&vtpm_globals->storageKeyHandle,
&vtpm_globals->keyAuth,
- &vtpm_globals->storageKey) );
+ &vtpm_globals->storageKey,
+ FALSE ) );
// Create entry for Dom0 for control messages
TPMTRYRETURN( VTPM_Handle_New_DMI(NULL) );
@@ -797,12 +834,11 @@
free (dmi_itr);
}
-
+ if ( (vtpm_globals->DMI_table_dirty) && (VTPM_SaveService() != TPM_SUCCESS) )
+ vtpmlogerror(VTPM_LOG_VTPM, "Unable to save manager data.\n");
+
TCS_CloseContext(vtpm_globals->manager_tcs_handle);
-
- if ( (vtpm_globals->DMI_table_dirty) &&
- (VTPM_SaveService() != TPM_SUCCESS) )
- vtpmlogerror(VTPM_LOG_VTPM, "Unable to save manager data.\n");
+ TCS_destroy();
hashtable_destroy(vtpm_globals->dmi_map, 1);
free(vtpm_globals);
diff -r 903fb46f240e -r cd914808acf1 tools/vtpm_manager/manager/vtpmpriv.h
--- a/tools/vtpm_manager/manager/vtpmpriv.h Tue Jan 3 14:59:00 2006
+++ b/tools/vtpm_manager/manager/vtpmpriv.h Tue Jan 3 16:19:20 2006
@@ -108,6 +108,7 @@
TCS_CONTEXT_HANDLE manager_tcs_handle; // TCS Handle used by manager
TPM_HANDLE storageKeyHandle; // Key used by persistent store
CRYPTO_INFO storageKey; // For software encryption
+ CRYPTO_INFO bootKey; // For saving table
TCS_AUTH keyAuth; // OIAP session for storageKey
BOOL DMI_table_dirty; // Indicates that a command
// has updated the DMI table
@@ -115,15 +116,17 @@
// Persistent Data
TPM_AUTHDATA owner_usage_auth; // OwnerAuth of real TPM
- TPM_AUTHDATA srk_usage_auth; // SRK Auth of real TPM
buffer_t storageKeyWrap; // Wrapped copy of storageKey
+ TPM_AUTHDATA srk_usage_auth;
+ TPM_AUTHDATA storage_key_usage_auth;
- TPM_AUTHDATA storage_key_usage_auth;
-
+ buffer_t bootKeyWrap; // Wrapped copy of boot key
+
}VTPM_GLOBALS;
-//Global dmi map
-extern VTPM_GLOBALS *vtpm_globals;
+// --------------------------- Global Values --------------------------
+extern VTPM_GLOBALS *vtpm_globals; // Key info and DMI states
+extern const TPM_AUTHDATA SRK_AUTH; // SRK Well Known Auth Value
// ********************** Command Handler Prototypes ***********************
TPM_RESULT VTPM_Handle_Load_NVM( VTPM_DMI_RESOURCE *myDMI,
diff -r 903fb46f240e -r cd914808acf1 tools/vtpm_manager/manager/vtsp.c
--- a/tools/vtpm_manager/manager/vtsp.c Tue Jan 3 14:59:00 2006
+++ b/tools/vtpm_manager/manager/vtsp.c Tue Jan 3 16:19:20 2006
@@ -563,63 +563,69 @@
const TPM_AUTHDATA *parentAuth,
TPM_HANDLE *newKeyHandle,
TCS_AUTH *auth,
- CRYPTO_INFO *cryptoinfo /*= NULL*/) {
-
-
- vtpmloginfo(VTPM_LOG_VTSP, "Loading Key.\n%s","");
+ CRYPTO_INFO *cryptoinfo,
+ const BOOL skipTPMLoad) {
+
+
+ vtpmloginfo(VTPM_LOG_VTSP, "Loading Key %s.\n", (!skipTPMLoad ? "into TPM" :
"only into memory"));
TPM_RESULT status = TPM_SUCCESS;
TPM_COMMAND_CODE command = TPM_ORD_LoadKey;
-
- BYTE *paramText; // Digest to make Auth.
+
+ BYTE *paramText=NULL; // Digest to make Auth.
UINT32 paramTextSize;
-
- if ((rgbWrappedKeyBlob == NULL) || (parentAuth == NULL) ||
- (newKeyHandle==NULL) || (auth==NULL)) {
- status = TPM_BAD_PARAMETER;
- goto abort_egress;
- }
-
- // Generate Extra TCS Parameters
- TPM_HANDLE phKeyHMAC;
-
- // Generate HMAC
- Crypto_GetRandom(&auth->NonceOdd, sizeof(TPM_NONCE) );
-
- paramText = (BYTE *) malloc(sizeof(BYTE) * TCPA_MAX_BUFFER_LENGTH);
-
- paramTextSize = BSG_PackList(paramText, 1,
- BSG_TPM_COMMAND_CODE, &command);
-
- memcpy(paramText + paramTextSize, rgbWrappedKeyBlob->bytes,
buffer_len(rgbWrappedKeyBlob));
- paramTextSize += buffer_len(rgbWrappedKeyBlob);
-
- TPMTRYRETURN( GenerateAuth( paramText, paramTextSize,
+
+ // SkipTPMLoad stops key from being loaded into TPM, but still generates
CRYPTO_INFO for it
+ if (! skipTPMLoad) {
+
+ if ((rgbWrappedKeyBlob == NULL) || (parentAuth == NULL) ||
+ (newKeyHandle==NULL) || (auth==NULL)) {
+ status = TPM_BAD_PARAMETER;
+ goto abort_egress;
+ }
+
+ // Generate Extra TCS Parameters
+ TPM_HANDLE phKeyHMAC;
+
+ // Generate HMAC
+ Crypto_GetRandom(&auth->NonceOdd, sizeof(TPM_NONCE) );
+
+ paramText = (BYTE *) malloc(sizeof(BYTE) * TCPA_MAX_BUFFER_LENGTH);
+
+ paramTextSize = BSG_PackList(paramText, 1,
+ BSG_TPM_COMMAND_CODE, &command);
+
+ memcpy(paramText + paramTextSize, rgbWrappedKeyBlob->bytes,
buffer_len(rgbWrappedKeyBlob));
+ paramTextSize += buffer_len(rgbWrappedKeyBlob);
+
+ TPMTRYRETURN( GenerateAuth( paramText, paramTextSize,
parentAuth, auth) );
- // Call TCS
- TPMTRYRETURN( TCSP_LoadKeyByBlob( hContext,
- hUnwrappingKey,
- buffer_len(rgbWrappedKeyBlob),
- rgbWrappedKeyBlob->bytes,
- auth,
- newKeyHandle,
- &phKeyHMAC) );
-
- // Verify Auth
- paramTextSize = BSG_PackList(paramText, 3,
- BSG_TPM_RESULT, &status,
- BSG_TPM_COMMAND_CODE, &command,
- BSG_TPM_HANDLE, newKeyHandle);
-
- TPMTRYRETURN( VerifyAuth( paramText, paramTextSize,
- parentAuth, auth,
- hContext) );
-
- // Unpack/return key structure
+ // Call TCS
+ TPMTRYRETURN( TCSP_LoadKeyByBlob( hContext,
+ hUnwrappingKey,
+ buffer_len(rgbWrappedKeyBlob),
+ rgbWrappedKeyBlob->bytes,
+ auth,
+ newKeyHandle,
+ &phKeyHMAC) );
+
+ // Verify Auth
+ paramTextSize = BSG_PackList(paramText, 3,
+ BSG_TPM_RESULT, &status,
+ BSG_TPM_COMMAND_CODE, &command,
+ BSG_TPM_HANDLE, newKeyHandle);
+
+ TPMTRYRETURN( VerifyAuth( paramText, paramTextSize,
+ parentAuth, auth,
+ hContext) );
+ }
+
+ // Build cryptoinfo structure for software crypto function.
if (cryptoinfo != NULL) {
TPM_KEY newKey;
+ // Unpack/return key structure
BSG_Unpack(BSG_TPM_KEY, rgbWrappedKeyBlob->bytes , &newKey);
TPM_RSA_KEY_PARMS rsaKeyParms;
diff -r 903fb46f240e -r cd914808acf1 tools/vtpm_manager/manager/vtsp.h
--- a/tools/vtpm_manager/manager/vtsp.h Tue Jan 3 14:59:00 2006
+++ b/tools/vtpm_manager/manager/vtsp.h Tue Jan 3 16:19:20 2006
@@ -86,7 +86,8 @@
const TPM_AUTHDATA *parentAuth,
TPM_HANDLE *newKeyHandle,
TCS_AUTH *pAuth,
- CRYPTO_INFO *cryptoinfo);
+ CRYPTO_INFO *cryptoinfo,
+ const BOOL skipTPMLoad);
TPM_RESULT VTSP_Unbind( const TCS_CONTEXT_HANDLE hContext,
const TPM_KEY_HANDLE key_handle,
diff -r 903fb46f240e -r cd914808acf1 tools/xentrace/Makefile
--- a/tools/xentrace/Makefile Tue Jan 3 14:59:00 2006
+++ b/tools/xentrace/Makefile Tue Jan 3 16:19:20 2006
@@ -15,25 +15,37 @@
OBJS = $(patsubst %.c,%.o,$(wildcard *.c))
BIN = xentrace tbctl setsize
+LIBBIN =
SCRIPTS = xentrace_format
MAN1 = $(wildcard *.1)
MAN8 = $(wildcard *.8)
+ifeq ($(XEN_TARGET_ARCH),x86_32)
+LIBBIN += xenctx
+endif
+
+ifeq ($(XEN_TARGET_ARCH),x86_64)
+LIBBIN += xenctx
+endif
+
all: build
-build: $(BIN)
+build: $(BIN) $(LIBBIN)
install: build
[ -d $(DESTDIR)/usr/bin ] || $(INSTALL_DIR) $(DESTDIR)/usr/bin
+ [ -z "$(LIBBIN)"] || [ -d $(DESTDIR)/usr/$(LIBDIR)/xen/bin ] || \
+ $(INSTALL_DIR) $(DESTDIR)/usr/$(LIBDIR)/xen/bin
[ -d $(DESTDIR)/usr/share/man/man1 ] || \
$(INSTALL_DIR) $(DESTDIR)/usr/share/man/man1
[ -d $(DESTDIR)/usr/share/man/man8 ] || \
$(INSTALL_DIR) $(DESTDIR)/usr/share/man/man8
$(INSTALL_PROG) $(BIN) $(SCRIPTS) $(DESTDIR)/usr/bin
+ [ -z "$(LIBBIN)"] || $(INSTALL_PROG) $(LIBBIN)
$(DESTDIR)/usr/$(LIBDIR)/xen/bin
$(INSTALL_DATA) $(MAN1) $(DESTDIR)/usr/share/man/man1
$(INSTALL_DATA) $(MAN8) $(DESTDIR)/usr/share/man/man8
clean:
- $(RM) *.a *.so *.o *.rpm $(BIN)
+ $(RM) *.a *.so *.o *.rpm $(BIN) $(LIBBIN)
%: %.c $(HDRS) Makefile
$(CC) $(CFLAGS) -o $@ $< -L$(XEN_LIBXC) -lxenctrl
diff -r 903fb46f240e -r cd914808acf1 tools/xentrace/xenctx.c
--- a/tools/xentrace/xenctx.c Tue Jan 3 14:59:00 2006
+++ b/tools/xentrace/xenctx.c Tue Jan 3 16:19:20 2006
@@ -20,15 +20,184 @@
#include <errno.h>
#include <argp.h>
#include <signal.h>
+#include <string.h>
+#include <getopt.h>
#include "xenctrl.h"
+
+int xc_handle = 0;
+int domid = 0;
+int frame_ptrs = 0;
+int stack_trace = 0;
+
+#if defined (__i386__)
+#define FMT_SIZE_T "%08x"
+#define STACK_POINTER(regs) (regs->esp)
+#define FRAME_POINTER(regs) (regs->ebp)
+#define INSTR_POINTER(regs) (regs->eip)
+#define STACK_ROWS 4
+#define STACK_COLS 8
+#elif defined (__x86_64__)
+#define FMT_SIZE_T "%016lx"
+#define STACK_POINTER(regs) (regs->rsp)
+#define FRAME_POINTER(regs) (regs->rbp)
+#define INSTR_POINTER(regs) (regs->rip)
+#define STACK_ROWS 4
+#define STACK_COLS 4
+#endif
+
+struct symbol {
+ size_t address;
+ char type;
+ char *name;
+ struct symbol *next;
+} *symbol_table = NULL;
+
+size_t kernel_stext, kernel_etext, kernel_sinittext, kernel_einittext;
+
+int is_kernel_text(size_t addr)
+{
+#if defined (__i386__)
+ if (symbol_table == NULL)
+ return (addr > 0xc000000);
+#elif defined (__x86_64__)
+ if (symbol_table == NULL)
+ return (addr > 0xffffffff80000000UL);
+#endif
+
+ if (addr >= kernel_stext &&
+ addr <= kernel_etext)
+ return 1;
+ if (addr >= kernel_sinittext &&
+ addr <= kernel_einittext)
+ return 1;
+ return 0;
+}
+
+void free_symbol(struct symbol *symbol)
+{
+ if (symbol == NULL)
+ return;
+ if (symbol->name)
+ free(symbol->name);
+ free(symbol);
+}
+
+void insert_symbol(struct symbol *symbol)
+{
+ static struct symbol *prev = NULL;
+ struct symbol *s = symbol_table;
+
+ if (s == NULL) {
+ symbol_table = symbol;
+ symbol->next = NULL;
+ return;
+ }
+
+ /* The System.map is usually already sorted... */
+ if (prev
+ && prev->address < symbol->address
+ && (!prev->next || prev->next->address > symbol->address)) {
+ s = prev;
+ } else {
+ /* ... otherwise do crappy/slow search for the correct place */
+ while(s && s->next && s->next->address < symbol->address)
+ s = s->next;
+ }
+
+ symbol->next = s->next;
+ s->next = symbol;
+ prev = symbol;
+}
+
+struct symbol *lookup_symbol(size_t address)
+{
+ struct symbol *s = symbol_table;
+
+ while(s && s->next && s->next->address < address)
+ s = s->next;
+
+ if (s && s->address < address)
+ return s;
+
+ return NULL;
+}
+
+void print_symbol(size_t addr)
+{
+ struct symbol *s;
+
+ if (!is_kernel_text(addr))
+ return;
+
+ s = lookup_symbol(addr);
+
+ if (s==NULL)
+ return;
+
+ if (addr==s->address)
+ printf("%s", s->name);
+ else
+ printf("%s+%#x", s->name, (unsigned int)(addr - s->address));
+}
+
+void read_symbol_table(const char *symtab)
+{
+ char line[256];
+ char *p;
+ struct symbol *symbol;
+ FILE *f;
+
+ f = fopen(symtab, "r");
+ if(f == NULL) {
+ fprintf(stderr, "failed to open symbol table %s\n", symtab);
+ exit(-1);
+ }
+
+ while(!feof(f)) {
+ if(fgets(line,256,f)==NULL)
+ break;
+
+ symbol = malloc(sizeof(*symbol));
+
+ /* need more checks for syntax here... */
+ symbol->address = strtoull(line, &p, 16);
+ p++;
+ symbol->type = *p++;
+ p++;
+
+ /* in the future we should handle the module name
+ * being appended here, this would allow us to use
+ * /proc/kallsyms as our symbol table
+ */
+ if (p[strlen(p)-1] == '\n')
+ p[strlen(p)-1] = '\0';
+ symbol->name = strdup(p);
+
+ insert_symbol(symbol);
+
+ if (strcmp(symbol->name, "_stext") == 0)
+ kernel_stext = symbol->address;
+ else if (strcmp(symbol->name, "_etext") == 0)
+ kernel_etext = symbol->address;
+ else if (strcmp(symbol->name, "_sinittext") == 0)
+ kernel_sinittext = symbol->address;
+ else if (strcmp(symbol->name, "_einittext") == 0)
+ kernel_einittext = symbol->address;
+ }
+
+ fclose(f);
+}
#ifdef __i386__
void print_ctx(vcpu_guest_context_t *ctx1)
{
struct cpu_user_regs *regs = &ctx1->user_regs;
- printf("eip: %08x\t", regs->eip);
+ printf("eip: %08x ", regs->eip);
+ print_symbol(regs->eip);
+ printf("\n");
+
printf("esp: %08x\n", regs->esp);
printf("eax: %08x\t", regs->eax);
@@ -51,7 +220,9 @@
{
struct cpu_user_regs *regs = &ctx1->user_regs;
- printf("rip: %08lx\t", regs->rip);
+ printf("rip: %08lx ", regs->rip);
+ print_symbol(regs->rip);
+ printf("\n");
printf("rsp: %08lx\n", regs->rsp);
printf("rax: %08lx\t", regs->rax);
@@ -63,8 +234,8 @@
printf("rdi: %08lx\t", regs->rdi);
printf("rbp: %08lx\n", regs->rbp);
- printf("r8: %08lx\t", regs->r8);
- printf("r9: %08lx\t", regs->r9);
+ printf(" r8: %08lx\t", regs->r8);
+ printf(" r9: %08lx\t", regs->r9);
printf("r10: %08lx\t", regs->r10);
printf("r11: %08lx\n", regs->r11);
@@ -81,35 +252,238 @@
}
#endif
-void dump_ctx(uint32_t domid, uint32_t vcpu)
+void *map_page(vcpu_guest_context_t *ctx, int vcpu, size_t virt)
+{
+ static unsigned long previous_mfn = 0;
+ static void *mapped = NULL;
+
+ unsigned long mfn = xc_translate_foreign_address(xc_handle, domid, vcpu,
virt);
+ unsigned long offset = virt & ~XC_PAGE_MASK;
+
+ if (mapped && mfn == previous_mfn)
+ goto out;
+
+ if (mapped)
+ munmap(mapped, XC_PAGE_SIZE);
+
+ previous_mfn = mfn;
+
+ mapped = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE, PROT_READ,
mfn);
+
+ if (mapped == NULL) {
+ fprintf(stderr, "failed to map page.\n");
+ exit(-1);
+ }
+
+ out:
+ return (void *)(mapped + offset);
+}
+
+void print_stack(vcpu_guest_context_t *ctx, int vcpu)
+{
+ struct cpu_user_regs *regs = &ctx->user_regs;
+ size_t stack = STACK_POINTER(regs);
+ size_t stack_limit = (STACK_POINTER(regs) & XC_PAGE_MASK) + XC_PAGE_SIZE;
+ size_t frame;
+ size_t instr;
+ size_t *p;
+ int i;
+
+ printf("\n");
+ printf("Stack:\n");
+ for (i=1; i<STACK_ROWS+1 && stack < stack_limit; i++) {
+ while(stack < stack_limit && stack < STACK_POINTER(regs) +
i*STACK_COLS*sizeof(stack)) {
+ p = map_page(ctx, vcpu, stack);
+ printf(" " FMT_SIZE_T, *p);
+ stack += sizeof(stack);
+ }
+ printf("\n");
+ }
+ printf("\n");
+
+ printf("Code:\n");
+ instr = INSTR_POINTER(regs) - 21;
+ for(i=0; i<32; i++) {
+ unsigned char *c = map_page(ctx, vcpu, instr+i);
+ if (instr+i == INSTR_POINTER(regs))
+ printf("<%02x> ", *c);
+ else
+ printf("%02x ", *c);
+ }
+ printf("\n");
+
+ printf("\n");
+
+ if(stack_trace)
+ printf("Stack Trace:\n");
+ else
+ printf("Call Trace:\n");
+ printf("%c [<" FMT_SIZE_T ">] ", stack_trace ? '*' : ' ',
INSTR_POINTER(regs));
+
+ print_symbol(INSTR_POINTER(regs));
+ printf(" <--\n");
+ if (frame_ptrs) {
+ stack = STACK_POINTER(regs);
+ frame = FRAME_POINTER(regs);
+ while(frame && stack < stack_limit) {
+ if (stack_trace) {
+ while (stack < frame) {
+ p = map_page(ctx, vcpu, stack);
+ printf("| " FMT_SIZE_T " ", *p);
+ printf("\n");
+ stack += sizeof(*p);
+ }
+ } else {
+ stack = frame;
+ }
+
+ p = map_page(ctx, vcpu, stack);
+ frame = *p;
+ if (stack_trace)
+ printf("|-- " FMT_SIZE_T "\n", *p);
+ stack += sizeof(*p);
+
+ if (frame) {
+ p = map_page(ctx, vcpu, stack);
+ printf("%c [<" FMT_SIZE_T ">] ", stack_trace ? '|' : ' ', *p);
+ print_symbol(*p);
+ printf("\n");
+ stack += sizeof(*p);
+ }
+ }
+ } else {
+ stack = STACK_POINTER(regs);
+ while(stack < stack_limit) {
+ p = map_page(ctx, vcpu, stack);
+ if (is_kernel_text(*p)) {
+ printf(" [<" FMT_SIZE_T ">] ", *p);
+ print_symbol(*p);
+ printf("\n");
+ } else if (stack_trace) {
+ printf(" " FMT_SIZE_T "\n", *p);
+ }
+ stack += sizeof(*p);
+ }
+ }
+}
+
+void dump_ctx(int vcpu)
{
int ret;
vcpu_guest_context_t ctx;
- int xc_handle = xc_interface_open(); /* for accessing control interface */
+ xc_handle = xc_interface_open(); /* for accessing control interface */
+
+ ret = xc_domain_pause(xc_handle, domid);
+ if (ret < 0) {
+ perror("xc_domain_pause");
+ exit(-1);
+ }
ret = xc_domain_get_vcpu_context(xc_handle, domid, vcpu, &ctx);
- if (ret != 0) {
+ if (ret < 0) {
+ xc_domain_unpause(xc_handle, domid);
perror("xc_domain_get_vcpu_context");
exit(-1);
}
+
print_ctx(&ctx);
+ if (is_kernel_text(ctx.user_regs.eip))
+ print_stack(&ctx, vcpu);
+
+ ret = xc_domain_unpause(xc_handle, domid);
+ if (ret < 0) {
+ perror("xc_domain_unpause");
+ exit(-1);
+ }
+
xc_interface_close(xc_handle);
+ if (ret < 0) {
+ perror("xc_interface_close");
+ exit(-1);
+ }
+}
+
+void usage(void)
+{
+ printf("usage:\n\n");
+
+ printf(" xenctx [options] <DOMAIN> [VCPU]\n\n");
+
+ printf("options:\n");
+ printf(" -f, --frame-pointers\n");
+ printf(" assume the kernel was compiled with\n");
+ printf(" frame pointers.\n");
+ printf(" -s SYMTAB, --symbol-table=SYMTAB\n");
+ printf(" read symbol table from SYMTAB.\n");
+ printf(" --stack-trace print a complete stack trace.\n");
}
int main(int argc, char **argv)
{
+ int ch;
+ const char *sopts = "fs:h";
+ const struct option lopts[] = {
+ {"stack-trace", 0, NULL, 'S'},
+ {"symbol-table", 1, NULL, 's'},
+ {"frame-pointers", 0, NULL, 'f'},
+ {"help", 0, NULL, 'h'},
+ {0, 0, 0, 0}
+ };
+ const char *symbol_table = NULL;
+
int vcpu = 0;
- if (argc < 2) {
- printf("usage: xenctx <domid> <optional vcpu>\n");
- exit(-1);
- }
-
- if (argc == 3)
- vcpu = atoi(argv[2]);
-
- dump_ctx(atoi(argv[1]), vcpu);
+ while ((ch = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
+ switch(ch) {
+ case 'f':
+ frame_ptrs = 1;
+ break;
+ case 's':
+ symbol_table = optarg;
+ break;
+ case 'S':
+ stack_trace = 1;
+ break;
+ case 'h':
+ usage();
+ exit(-1);
+ case '?':
+ fprintf(stderr, "%s --help for more options\n", argv[0]);
+ exit(-1);
+ }
+ }
+
+ argv += optind; argc -= optind;
+
+ if (argc < 1 || argc > 2) {
+ printf("usage: xenctx [options] <domid> <optional vcpu>\n");
+ exit(-1);
+ }
+
+ domid = atoi(argv[0]);
+ if (domid==0) {
+ fprintf(stderr, "cannot trace dom0\n");
+ exit(-1);
+ }
+
+ if (argc == 2)
+ vcpu = atoi(argv[1]);
+
+ if (symbol_table)
+ read_symbol_table(symbol_table);
+
+ dump_ctx(vcpu);
return 0;
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 903fb46f240e -r cd914808acf1 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/ia64/xen/domain.c Tue Jan 3 16:19:20 2006
@@ -181,7 +181,7 @@
memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
}
-void arch_do_createdomain(struct vcpu *v)
+int arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
struct thread_info *ti = alloc_thread_info(v);
@@ -248,7 +248,9 @@
}
} else
d->arch.mm = NULL;
- printf ("arch_do_create_domain: domain=%p\n", d);
+ printf ("arch_do_create_domain: domain=%p\n", d);
+
+ return 0;
}
void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
@@ -754,7 +756,10 @@
*/
void physdev_init_dom0(struct domain *d)
{
- set_bit(_DOMF_physdev_access, &d->domain_flags);
+ if (iomem_permit_access(d, 0UL, ~0UL))
+ BUG();
+ if (irqs_permit_access(d, 0, NR_PIRQS-1))
+ BUG();
}
unsigned int vmx_dom0 = 0;
diff -r 903fb46f240e -r cd914808acf1 xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/ia64/xen/irq.c Tue Jan 3 16:19:20 2006
@@ -1377,9 +1377,6 @@
irq_guest_action_t *action;
unsigned long flags;
int rc = 0;
-
- if ( !IS_CAPABLE_PHYSDEV(d->domain) )
- return -EPERM;
spin_lock_irqsave(&desc->lock, flags);
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/Makefile
--- a/xen/arch/x86/Makefile Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/Makefile Tue Jan 3 16:19:20 2006
@@ -29,6 +29,7 @@
endif
OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
+OBJS := $(subst $(TARGET_SUBARCH)/xen.lds.o,,$(OBJS))
ifneq ($(crash_debug),y)
OBJS := $(patsubst cdb%.o,,$(OBJS))
@@ -43,21 +44,24 @@
$(CURDIR)/arch.o: $(OBJS)
$(LD) $(LDFLAGS) -r -o $@ $(OBJS)
-$(TARGET)-syms: boot/$(TARGET_SUBARCH).o $(ALL_OBJS) $(TARGET_SUBARCH)/xen.lds
- $(LD) $(LDFLAGS) -T $(TARGET_SUBARCH)/xen.lds -N \
+$(TARGET)-syms: boot/$(TARGET_SUBARCH).o $(ALL_OBJS) xen.lds
+ $(LD) $(LDFLAGS) -T xen.lds -N \
boot/$(TARGET_SUBARCH).o $(ALL_OBJS) -o $@
$(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S
$(MAKE) $(BASEDIR)/xen-syms.o
- $(LD) $(LDFLAGS) -T $(TARGET_SUBARCH)/xen.lds -N \
+ $(LD) $(LDFLAGS) -T xen.lds -N \
boot/$(TARGET_SUBARCH).o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
$(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S
$(MAKE) $(BASEDIR)/xen-syms.o
- $(LD) $(LDFLAGS) -T $(TARGET_SUBARCH)/xen.lds -N \
+ $(LD) $(LDFLAGS) -T xen.lds -N \
boot/$(TARGET_SUBARCH).o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
rm -f $(BASEDIR)/xen-syms.S $(BASEDIR)/xen-syms.o
asm-offsets.s: $(TARGET_SUBARCH)/asm-offsets.c $(HDRS)
$(CC) $(CFLAGS) -S -o $@ $<
+
+xen.lds: $(TARGET_SUBARCH)/xen.lds.S $(HDRS)
+ $(CC) $(CFLAGS) -P -E -Ui386 -D__ASSEMBLY__ -o $@ $<
boot/mkelf32: boot/mkelf32.c
$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
@@ -73,5 +77,6 @@
rm -f dm/*.o dm/*~ dm/core
rm -f genapic/*.o genapic/*~ genapic/core
rm -f cpu/*.o cpu/*~ cpu/core
+ rm -f xen.lds
.PHONY: default clean
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/boot/x86_32.S
--- a/xen/arch/x86/boot/x86_32.S Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/boot/x86_32.S Tue Jan 3 16:19:20 2006
@@ -1,5 +1,6 @@
#include <xen/config.h>
#include <public/xen.h>
+#include <asm/asm_defns.h>
#include <asm/desc.h>
#include <asm/page.h>
#include <asm/msr.h>
@@ -53,6 +54,7 @@
mov %ecx,%gs
ljmp $(__HYPERVISOR_CS),$(1f)-__PAGE_OFFSET
1: lss stack_start-__PAGE_OFFSET,%esp
+ add $(STACK_SIZE-CPUINFO_sizeof-__PAGE_OFFSET),%esp
/* Reset EFLAGS (subsumes CLI and CLD). */
pushl $0
@@ -189,7 +191,7 @@
/*** STACK LOCATION ***/
ENTRY(stack_start)
- .long cpu0_stack + STACK_SIZE - 200 - __PAGE_OFFSET
+ .long cpu0_stack
.long __HYPERVISOR_DS
/*** DESCRIPTOR TABLES ***/
@@ -256,10 +258,6 @@
.fill 1*PAGE_SIZE,1,0
#endif
-#if (STACK_ORDER == 0)
-.section ".bss.page_aligned","w"
-#else
-.section ".bss.twopage_aligned","w"
-#endif
+.section ".bss.stack_aligned","w"
ENTRY(cpu0_stack)
.fill STACK_SIZE,1,0
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/boot/x86_64.S
--- a/xen/arch/x86/boot/x86_64.S Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/boot/x86_64.S Tue Jan 3 16:19:20 2006
@@ -1,5 +1,6 @@
#include <xen/config.h>
#include <public/xen.h>
+#include <asm/asm_defns.h>
#include <asm/desc.h>
#include <asm/page.h>
#include <asm/msr.h>
@@ -121,7 +122,8 @@
mov %rcx,%cr4
mov stack_start(%rip),%rsp
-
+ or $(STACK_SIZE-CPUINFO_sizeof),%rsp
+
/* Reset EFLAGS (subsumes CLI and CLD). */
pushq $0
popf
@@ -140,7 +142,7 @@
mov %ecx,%ss
lidt idt_descr(%rip)
-
+
cmp $(SECONDARY_CPU_FLAG),%ebx
je start_secondary
@@ -219,7 +221,7 @@
.quad idt_table
ENTRY(stack_start)
- .quad cpu0_stack + STACK_SIZE - 200
+ .quad cpu0_stack
high_start:
.quad __high_start
@@ -265,10 +267,6 @@
.org 0x4000 + PAGE_SIZE
.code64
-#if (STACK_ORDER == 0)
-.section ".bss.page_aligned","w"
-#else
-.section ".bss.twopage_aligned","w"
-#endif
+.section ".bss.stack_aligned","w"
ENTRY(cpu0_stack)
.fill STACK_SIZE,1,0
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/dom0_ops.c Tue Jan 3 16:19:20 2006
@@ -17,6 +17,7 @@
#include <asm/msr.h>
#include <xen/trace.h>
#include <xen/console.h>
+#include <xen/iocap.h>
#include <asm/shadow.h>
#include <asm/irq.h>
#include <asm/processor.h>
@@ -141,7 +142,6 @@
struct domain *d;
unsigned int fp = op->u.ioport_permission.first_port;
unsigned int np = op->u.ioport_permission.nr_ports;
- unsigned int p;
ret = -EINVAL;
if ( (fp + np) > 65536 )
@@ -152,26 +152,12 @@
op->u.ioport_permission.domain)) == NULL) )
break;
- ret = -ENOMEM;
- if ( d->arch.iobmp_mask != NULL )
- {
- if ( (d->arch.iobmp_mask = xmalloc_array(
- u8, IOBMP_BYTES)) == NULL )
- {
- put_domain(d);
- break;
- }
- memset(d->arch.iobmp_mask, 0xFF, IOBMP_BYTES);
- }
-
- ret = 0;
- for ( p = fp; p < (fp + np); p++ )
- {
- if ( op->u.ioport_permission.allow_access )
- clear_bit(p, d->arch.iobmp_mask);
- else
- set_bit(p, d->arch.iobmp_mask);
- }
+ if ( np == 0 )
+ ret = 0;
+ else if ( op->u.ioport_permission.allow_access )
+ ret = ioports_permit_access(d, fp, fp + np - 1);
+ else
+ ret = ioports_deny_access(d, fp, fp + np - 1);
put_domain(d);
}
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/domain.c Tue Jan 3 16:19:20 2006
@@ -20,6 +20,7 @@
#include <xen/delay.h>
#include <xen/softirq.h>
#include <xen/grant_table.h>
+#include <xen/iocap.h>
#include <asm/regs.h>
#include <asm/mc146818rtc.h>
#include <asm/system.h>
@@ -35,9 +36,7 @@
#include <xen/console.h>
#include <xen/elf.h>
#include <asm/vmx.h>
-#include <asm/vmx_vmcs.h>
#include <asm/msr.h>
-#include <asm/physdev.h>
#include <xen/kernel.h>
#include <xen/multicall.h>
@@ -98,7 +97,7 @@
cpu_set(smp_processor_id(), v->domain->cpumask);
v->arch.schedule_tail = continue_idle_task;
- idle_loop();
+ reset_stack_and_jump(idle_loop);
}
static long no_idt[2];
@@ -185,11 +184,17 @@
{
struct pfn_info *page;
- if ( d->tot_pages < 10 )
+ printk("Memory pages belonging to domain %u:\n", d->domain_id);
+
+ if ( d->tot_pages >= 10 )
+ {
+ printk(" DomPage list too long to display\n");
+ }
+ else
{
list_for_each_entry ( page, &d->page_list, list )
{
- printk("Page %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
+ printk(" DomPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
_p(page_to_phys(page)), _p(page_to_pfn(page)),
page->count_info, page->u.inuse.type_info);
}
@@ -197,15 +202,10 @@
list_for_each_entry ( page, &d->xenpage_list, list )
{
- printk("XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
+ printk(" XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
_p(page_to_phys(page)), _p(page_to_pfn(page)),
page->count_info, page->u.inuse.type_info);
}
-
- page = virt_to_page(d->shared_info);
- printk("Shared_info@%p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
- _p(page_to_phys(page)), _p(page_to_pfn(page)), page->count_info,
- page->u.inuse.type_info);
}
struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
@@ -250,21 +250,34 @@
#endif
}
-void arch_do_createdomain(struct vcpu *v)
+int arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
l1_pgentry_t gdt_l1e;
- int vcpuid, pdpt_order;
+ int vcpuid, pdpt_order, rc;
#ifdef __x86_64__
int i;
#endif
if ( is_idle_task(d) )
- return;
+ return 0;
+
+ d->arch.ioport_caps =
+ rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
+ if ( d->arch.ioport_caps == NULL )
+ return -ENOMEM;
+
+ if ( (d->shared_info = alloc_xenheap_page()) == NULL )
+ return -ENOMEM;
+
+ if ( (rc = ptwr_init(d)) != 0 )
+ {
+ free_xenheap_page(d->shared_info);
+ return rc;
+ }
v->arch.schedule_tail = continue_nonidle_task;
- d->shared_info = alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
v->cpumap = CPUMAP_RUNANYWHERE;
@@ -308,10 +321,10 @@
__PAGE_HYPERVISOR);
#endif
- (void)ptwr_init(d);
-
shadow_lock_init(d);
INIT_LIST_HEAD(&d->arch.free_shadow_frames);
+
+ return 0;
}
void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
@@ -348,6 +361,8 @@
((c->user_regs.ss & 3) == 0) )
return -EINVAL;
}
+ else if ( !hvm_enabled )
+ return -EINVAL;
clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
if ( c->flags & VGCF_I387_VALID )
@@ -953,8 +968,6 @@
BUG_ON(!cpus_empty(d->cpumask));
- physdev_destroy_state(d);
-
ptwr_destroy(d);
/* Drop the in-use references to page-table bases. */
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/domain_build.c Tue Jan 3 16:19:20 2006
@@ -16,13 +16,13 @@
#include <xen/kernel.h>
#include <xen/domain.h>
#include <xen/compile.h>
+#include <xen/iocap.h>
#include <asm/regs.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/desc.h>
#include <asm/i387.h>
-#include <asm/physdev.h>
#include <asm/shadow.h>
static long dom0_nrpages;
@@ -94,9 +94,9 @@
return page;
}
-static void process_dom0_ioports_disable()
+static void process_dom0_ioports_disable(void)
{
- unsigned long io_from, io_to, io_nr;
+ unsigned long io_from, io_to;
char *t, *u, *s = opt_dom0_ioports_disable;
if ( *s == '\0' )
@@ -126,8 +126,8 @@
printk("Disabling dom0 access to ioport range %04lx-%04lx\n",
io_from, io_to);
- io_nr = io_to - io_from + 1;
- physdev_modify_ioport_access_range(dom0, 0, io_from, io_nr);
+ if ( ioports_deny_access(dom0, io_from, io_to) != 0 )
+ BUG();
}
}
@@ -183,7 +183,6 @@
/* Machine address of next candidate page-table page. */
unsigned long mpt_alloc;
- extern void physdev_init_dom0(struct domain *);
extern void translate_l2pgtable(
struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn);
@@ -692,9 +691,6 @@
zap_low_mappings(l2start);
zap_low_mappings(idle_pg_table_l2);
#endif
-
- /* DOM0 gets access to everything. */
- physdev_init_dom0(d);
init_domain_time(d);
@@ -746,19 +742,28 @@
printk("dom0: shadow setup done\n");
}
+ i = 0;
+
+ /* DOM0 is permitted full I/O capabilities. */
+ i |= ioports_permit_access(dom0, 0, 0xFFFF);
+ i |= iomem_permit_access(dom0, 0UL, ~0UL);
+ i |= irqs_permit_access(dom0, 0, NR_PIRQS-1);
+
/*
* Modify I/O port access permissions.
*/
/* Master Interrupt Controller (PIC). */
- physdev_modify_ioport_access_range(dom0, 0, 0x20, 2);
+ i |= ioports_deny_access(dom0, 0x20, 0x21);
/* Slave Interrupt Controller (PIC). */
- physdev_modify_ioport_access_range(dom0, 0, 0xA0, 2);
+ i |= ioports_deny_access(dom0, 0xA0, 0xA1);
/* Interval Timer (PIT). */
- physdev_modify_ioport_access_range(dom0, 0, 0x40, 4);
+ i |= ioports_deny_access(dom0, 0x40, 0x43);
/* PIT Channel 2 / PC Speaker Control. */
- physdev_modify_ioport_access_range(dom0, 0, 0x61, 1);
- /* Command-line passed i/o ranges */
+ i |= ioports_deny_access(dom0, 0x61, 0x61);
+ /* Command-line I/O ranges. */
process_dom0_ioports_disable();
+
+ BUG_ON(i != 0);
return 0;
}
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/irq.c Tue Jan 3 16:19:20 2006
@@ -199,15 +199,11 @@
int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
unsigned int vector = irq_to_vector(irq);
- struct domain *d = v->domain;
irq_desc_t *desc = &irq_desc[vector];
irq_guest_action_t *action;
unsigned long flags;
int rc = 0;
cpumask_t cpumask = CPU_MASK_NONE;
-
- if ( !IS_CAPABLE_PHYSDEV(d) )
- return -EPERM;
if ( vector == 0 )
return -EBUSY;
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/mm.c Tue Jan 3 16:19:20 2006
@@ -96,6 +96,7 @@
#include <xen/softirq.h>
#include <xen/domain_page.h>
#include <xen/event.h>
+#include <xen/iocap.h>
#include <asm/shadow.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
@@ -437,7 +438,6 @@
unsigned long mfn = l1e_get_pfn(l1e);
struct pfn_info *page = pfn_to_page(mfn);
int okay;
- extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
return 1;
@@ -455,8 +455,7 @@
if ( d == dom_io )
d = current->domain;
- if ( (!IS_PRIV(d)) &&
- (!IS_CAPABLE_PHYSDEV(d) || !domain_iomem_in_pfn(d, mfn)) )
+ if ( !iomem_access_permitted(d, mfn, mfn) )
{
MEM_LOG("Non-privileged attempt to map I/O space %08lx", mfn);
return 0;
@@ -1887,7 +1886,7 @@
break;
case MMUEXT_FLUSH_CACHE:
- if ( unlikely(!IS_CAPABLE_PHYSDEV(d)) )
+ if ( unlikely(!cache_flush_permitted(d)) )
{
MEM_LOG("Non-physdev domain tried to FLUSH_CACHE.");
okay = 0;
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/physdev.c Tue Jan 3 16:19:20 2006
@@ -13,27 +13,6 @@
extern int ioapic_guest_read(int apicid, int address, u32 *pval);
extern int ioapic_guest_write(int apicid, int address, u32 pval);
-
-void physdev_modify_ioport_access_range(
- struct domain *d, int enable, int port, int num)
-{
- int i;
- for ( i = port; i < (port + num); i++ )
- (enable ? clear_bit : set_bit)(i, d->arch.iobmp_mask);
-}
-
-void physdev_destroy_state(struct domain *d)
-{
- xfree(d->arch.iobmp_mask);
- d->arch.iobmp_mask = NULL;
-}
-
-/* Check if a domain controls a device with IO memory within frame @pfn.
- * Returns: 1 if the domain should be allowed to map @pfn, 0 otherwise. */
-int domain_iomem_in_pfn(struct domain *p, unsigned long pfn)
-{
- return 0;
-}
/*
* Demuxing hypercall.
@@ -120,18 +99,6 @@
return ret;
}
-/* Domain 0 has read access to all devices. */
-void physdev_init_dom0(struct domain *d)
-{
- /* Access to all I/O ports. */
- d->arch.iobmp_mask = xmalloc_array(u8, IOBMP_BYTES);
- BUG_ON(d->arch.iobmp_mask == NULL);
- memset(d->arch.iobmp_mask, 0, IOBMP_BYTES);
-
- set_bit(_DOMF_physdev_access, &d->domain_flags);
-}
-
-
/*
* Local variables:
* mode: C
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/setup.c Tue Jan 3 16:19:20 2006
@@ -138,131 +138,19 @@
(*call)();
}
-static void __init start_of_day(void)
-{
- int i;
- unsigned long vgdt, gdt_pfn;
-
- early_cpu_init();
-
- paging_init();
-
- /* Unmap the first page of CPU0's stack. */
- memguard_guard_stack(cpu0_stack);
-
- open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
-
- if ( opt_watchdog )
- nmi_watchdog = NMI_LOCAL_APIC;
-
- sort_exception_tables();
-
- arch_do_createdomain(current);
-
- /*
- * Map default GDT into its final positions in the idle page table. As
- * noted in arch_do_createdomain(), we must map for every possible VCPU#.
- */
- vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE;
- gdt_pfn = virt_to_phys(gdt_table) >> PAGE_SHIFT;
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
- {
- map_pages_to_xen(vgdt, gdt_pfn, 1, PAGE_HYPERVISOR);
- vgdt += 1 << PDPT_VCPU_VA_SHIFT;
- }
-
- find_smp_config();
-
- smp_alloc_memory();
-
- dmi_scan_machine();
-
- generic_apic_probe();
-
- acpi_boot_table_init();
- acpi_boot_init();
-
- if ( smp_found_config )
- get_smp_config();
-
- init_apic_mappings();
-
- init_IRQ();
-
- trap_init();
-
- ac_timer_init();
-
- early_time_init();
-
- arch_init_memory();
-
- scheduler_init();
-
- identify_cpu(&boot_cpu_data);
- if ( cpu_has_fxsr )
- set_in_cr4(X86_CR4_OSFXSR);
- if ( cpu_has_xmm )
- set_in_cr4(X86_CR4_OSXMMEXCPT);
-
- if ( opt_nosmp )
- {
- max_cpus = 0;
- smp_num_siblings = 1;
- boot_cpu_data.x86_num_cores = 1;
- }
-
- smp_prepare_cpus(max_cpus);
-
- /* We aren't hotplug-capable yet. */
- BUG_ON(!cpus_empty(cpu_present_map));
- for_each_cpu ( i )
- cpu_set(i, cpu_present_map);
-
- /*
- * Initialise higher-level timer functions. We do this fairly late
- * (post-SMP) because the time bases and scale factors need to be updated
- * regularly, and SMP initialisation can cause a long delay with
- * interrupts not yet enabled.
- */
- init_xen_time();
-
- initialize_keytable();
-
- serial_init_postirq();
-
- BUG_ON(!local_irq_is_enabled());
-
- for_each_present_cpu ( i )
- {
- if ( num_online_cpus() >= max_cpus )
- break;
- if ( !cpu_online(i) )
- __cpu_up(i);
- }
-
- printk("Brought up %ld CPUs\n", (long)num_online_cpus());
- smp_cpus_done(max_cpus);
-
- do_initcalls();
-
- schedulers_start();
-
- watchdog_enable();
-}
-
#define EARLY_FAIL() for ( ; ; ) __asm__ __volatile__ ( "hlt" )
static struct e820entry e820_raw[E820MAX];
void __init __start_xen(multiboot_info_t *mbi)
{
+ unsigned long vgdt, gdt_pfn;
char *cmdline;
+ unsigned long _initrd_start = 0, _initrd_len = 0;
+ unsigned int initrdidx = 1;
module_t *mod = (module_t *)__va(mbi->mods_addr);
unsigned long nr_pages, modules_length;
unsigned long initial_images_start, initial_images_end;
- unsigned long _initrd_start = 0, _initrd_len = 0;
- unsigned int initrdidx = 1;
physaddr_t s, e;
int i, e820_warn = 0, e820_raw_nr = 0, bytes = 0;
struct ns16550_defaults ns16550 = {
@@ -486,7 +374,113 @@
early_boot = 0;
- start_of_day();
+ early_cpu_init();
+
+ paging_init();
+
+ /* Unmap the first page of CPU0's stack. */
+ memguard_guard_stack(cpu0_stack);
+
+ open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
+
+ if ( opt_watchdog )
+ nmi_watchdog = NMI_LOCAL_APIC;
+
+ sort_exception_tables();
+
+ if ( arch_do_createdomain(current) != 0 )
+ BUG();
+
+ /*
+ * Map default GDT into its final positions in the idle page table. As
+ * noted in arch_do_createdomain(), we must map for every possible VCPU#.
+ */
+ vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE;
+ gdt_pfn = virt_to_phys(gdt_table) >> PAGE_SHIFT;
+ for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ {
+ map_pages_to_xen(vgdt, gdt_pfn, 1, PAGE_HYPERVISOR);
+ vgdt += 1 << PDPT_VCPU_VA_SHIFT;
+ }
+
+ find_smp_config();
+
+ smp_alloc_memory();
+
+ dmi_scan_machine();
+
+ generic_apic_probe();
+
+ acpi_boot_table_init();
+ acpi_boot_init();
+
+ if ( smp_found_config )
+ get_smp_config();
+
+ init_apic_mappings();
+
+ init_IRQ();
+
+ trap_init();
+
+ ac_timer_init();
+
+ early_time_init();
+
+ arch_init_memory();
+
+ scheduler_init();
+
+ identify_cpu(&boot_cpu_data);
+ if ( cpu_has_fxsr )
+ set_in_cr4(X86_CR4_OSFXSR);
+ if ( cpu_has_xmm )
+ set_in_cr4(X86_CR4_OSXMMEXCPT);
+
+ if ( opt_nosmp )
+ {
+ max_cpus = 0;
+ smp_num_siblings = 1;
+ boot_cpu_data.x86_num_cores = 1;
+ }
+
+ smp_prepare_cpus(max_cpus);
+
+ /* We aren't hotplug-capable yet. */
+ BUG_ON(!cpus_empty(cpu_present_map));
+ for_each_cpu ( i )
+ cpu_set(i, cpu_present_map);
+
+ /*
+ * Initialise higher-level timer functions. We do this fairly late
+ * (post-SMP) because the time bases and scale factors need to be updated
+ * regularly, and SMP initialisation can cause a long delay with
+ * interrupts not yet enabled.
+ */
+ init_xen_time();
+
+ initialize_keytable();
+
+ serial_init_postirq();
+
+ BUG_ON(!local_irq_is_enabled());
+
+ for_each_present_cpu ( i )
+ {
+ if ( num_online_cpus() >= max_cpus )
+ break;
+ if ( !cpu_online(i) )
+ __cpu_up(i);
+ }
+
+ printk("Brought up %ld CPUs\n", (long)num_online_cpus());
+ smp_cpus_done(max_cpus);
+
+ do_initcalls();
+
+ schedulers_start();
+
+ watchdog_enable();
shadow_mode_init();
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/smpboot.c Tue Jan 3 16:19:20 2006
@@ -763,7 +763,6 @@
{
struct domain *idle;
struct vcpu *v;
- void *stack;
unsigned long boot_error;
int timeout, cpu;
unsigned long start_eip;
@@ -786,16 +785,10 @@
/* So we see what's up */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
- stack = alloc_xenheap_pages(STACK_ORDER);
-#if defined(__i386__)
- stack_start.esp = (void *)__pa(stack);
-#elif defined(__x86_64__)
- stack_start.esp = stack;
-#endif
- stack_start.esp += STACK_SIZE - sizeof(struct cpu_info);
+ stack_start.esp = alloc_xenheap_pages(STACK_ORDER);
/* Debug build: detect stack overflow by setting up a guard page. */
- memguard_guard_stack(stack);
+ memguard_guard_stack(stack_start.esp);
/*
* This grunge runs the startup process for
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/traps.c Tue Jan 3 16:19:20 2006
@@ -41,6 +41,7 @@
#include <xen/softirq.h>
#include <xen/domain_page.h>
#include <xen/symbols.h>
+#include <xen/iocap.h>
#include <asm/shadow.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -192,7 +193,8 @@
/* Bounds for range of valid frame pointer. */
low = (unsigned long)(ESP_BEFORE_EXCEPTION(regs) - 2);
- high = (low & ~(STACK_SIZE - 1)) + (STACK_SIZE - sizeof(struct cpu_info));
+ high = (low & ~(STACK_SIZE - 1)) +
+ (STACK_SIZE - sizeof(struct cpu_info) - 2*sizeof(unsigned long));
/* The initial frame pointer. */
next = regs->ebp;
@@ -200,14 +202,14 @@
for ( ; ; )
{
/* Valid frame pointer? */
- if ( (next < low) || (next > high) )
+ if ( (next < low) || (next >= high) )
{
/*
* Exception stack frames have a different layout, denoted by an
* inverted frame pointer.
*/
next = ~next;
- if ( (next < low) || (next > high) )
+ if ( (next < low) || (next >= high) )
break;
frame = (unsigned long *)next;
next = frame[0];
@@ -621,17 +623,7 @@
unsigned int port, unsigned int bytes,
struct vcpu *v, struct cpu_user_regs *regs)
{
- struct domain *d = v->domain;
- u16 x;
-
- if ( d->arch.iobmp_mask != NULL )
- {
- x = *(u16 *)(d->arch.iobmp_mask + (port >> 3));
- if ( (x & (((1<<bytes)-1) << (port&7))) == 0 )
- return 1;
- }
-
- return 0;
+ return ioports_access_permitted(v->domain, port, port + bytes - 1);
}
/* Check admin limits. Silently fail the access if it is disallowed. */
@@ -871,7 +863,7 @@
case 0x09: /* WBINVD */
/* Ignore the instruction if unprivileged. */
- if ( !IS_CAPABLE_PHYSDEV(v->domain) )
+ if ( !cache_flush_permitted(v->domain) )
DPRINTK("Non-physdev domain attempted WBINVD.\n");
else
wbinvd();
@@ -885,7 +877,8 @@
switch ( modrm_reg )
{
case 0: /* Read CR0 */
- *reg = v->arch.guest_context.ctrlreg[0];
+ *reg = (read_cr0() & ~X86_CR0_TS) |
+ v->arch.guest_context.ctrlreg[0];
break;
case 2: /* Read CR2 */
@@ -927,6 +920,11 @@
switch ( modrm_reg )
{
case 0: /* Write CR0 */
+ if ( (*reg ^ read_cr0()) & ~X86_CR0_TS )
+ {
+ DPRINTK("Attempt to change unmodifiable CR0 flags.\n");
+ goto fail;
+ }
(void)do_fpu_taskswitch(!!(*reg & X86_CR0_TS));
break;
@@ -939,6 +937,14 @@
LOCK_BIGLOCK(v->domain);
(void)new_guest_cr3(*reg);
UNLOCK_BIGLOCK(v->domain);
+ break;
+
+ case 4:
+ if ( *reg != (read_cr4() & ~(X86_CR4_PGE|X86_CR4_PSE)) )
+ {
+ DPRINTK("Attempt to change CR4 flags.\n");
+ goto fail;
+ }
break;
default:
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/x86_emulate.c Tue Jan 3 16:19:20 2006
@@ -371,6 +371,21 @@
(_type)_x; \
})
+/* Access/update address held in a register, based on addressing mode. */
+#define register_address(sel, reg) \
+ ((ad_bytes == sizeof(unsigned long)) ? (reg) : \
+ ((mode == X86EMUL_MODE_REAL) ? /* implies ad_bytes == 2 */ \
+ (((unsigned long)(sel) << 4) + ((reg) & 0xffff)) : \
+ ((reg) & ((1UL << (ad_bytes << 3)) - 1))))
+#define register_address_increment(reg, inc) \
+do { \
+ if ( ad_bytes == sizeof(unsigned long) ) \
+ (reg) += (inc); \
+ else \
+ (reg) = ((reg) & ~((1UL << (ad_bytes << 3)) - 1)) | \
+ (((reg) + (inc)) & ((1UL << (ad_bytes << 3)) - 1)); \
+} while (0)
+
void *
decode_register(
uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
@@ -420,32 +435,64 @@
{
uint8_t b, d, sib, twobyte = 0, rex_prefix = 0;
uint8_t modrm, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
- unsigned int op_bytes = (mode == 8) ? 4 : mode, ad_bytes = mode;
- unsigned int lock_prefix = 0, rep_prefix = 0, i;
+ uint16_t *seg = NULL; /* override segment */
+ unsigned int op_bytes, ad_bytes, lock_prefix = 0, rep_prefix = 0, i;
int rc = 0;
struct operand src, dst;
/* Shadow copy of register state. Committed on successful emulation. */
struct cpu_user_regs _regs = *regs;
+ switch ( mode )
+ {
+ case X86EMUL_MODE_REAL:
+ case X86EMUL_MODE_PROT16:
+ op_bytes = ad_bytes = 2;
+ break;
+ case X86EMUL_MODE_PROT32:
+ op_bytes = ad_bytes = 4;
+ break;
+#ifdef __x86_64__
+ case X86EMUL_MODE_PROT64:
+ op_bytes = 4;
+ ad_bytes = 8;
+ break;
+#endif
+ default:
+ return -1;
+ }
+
/* Legacy prefixes. */
for ( i = 0; i < 8; i++ )
{
switch ( b = insn_fetch(uint8_t, 1, _regs.eip) )
{
case 0x66: /* operand-size override */
- op_bytes ^= 6; /* switch between 2/4 bytes */
+ op_bytes ^= 6; /* switch between 2/4 bytes */
break;
case 0x67: /* address-size override */
- ad_bytes ^= (mode == 8) ? 12 : 6; /* switch between 2/4/8 bytes */
+ if ( mode == X86EMUL_MODE_PROT64 )
+ ad_bytes ^= 12; /* switch between 4/8 bytes */
+ else
+ ad_bytes ^= 6; /* switch between 2/4 bytes */
break;
case 0x2e: /* CS override */
+ seg = &_regs.cs;
+ break;
case 0x3e: /* DS override */
+ seg = &_regs.ds;
+ break;
case 0x26: /* ES override */
+ seg = &_regs.es;
+ break;
case 0x64: /* FS override */
+ seg = &_regs.fs;
+ break;
case 0x65: /* GS override */
+ seg = &_regs.gs;
+ break;
case 0x36: /* SS override */
- DPRINTF("Warning: ignoring a segment override.\n");
+ seg = &_regs.ss;
break;
case 0xf0: /* LOCK */
lock_prefix = 1;
@@ -461,8 +508,12 @@
}
done_prefixes:
+ /* Note quite the same as 80386 real mode, but hopefully good enough. */
+ if ( (mode == X86EMUL_MODE_REAL) && (ad_bytes != 2) )
+ goto cannot_emulate;
+
/* REX prefix. */
- if ( (mode == 8) && ((b & 0xf0) == 0x40) )
+ if ( (mode == X86EMUL_MODE_PROT64) && ((b & 0xf0) == 0x40) )
{
rex_prefix = b;
if ( b & 8 )
@@ -674,7 +725,7 @@
emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
break;
case 0x63: /* movsxd */
- if ( mode != 8 ) /* x86/64 long mode only */
+ if ( mode != X86EMUL_MODE_PROT64 )
goto cannot_emulate;
dst.val = (int32_t)src.val;
break;
@@ -721,12 +772,13 @@
dst.val = src.val;
break;
case 0x8f: /* pop (sole member of Grp1a) */
- /* 64-bit mode: POP defaults to 64-bit operands. */
- if ( (mode == 8) && (dst.bytes == 4) )
+ /* 64-bit mode: POP always pops a 64-bit operand. */
+ if ( mode == X86EMUL_MODE_PROT64 )
dst.bytes = 8;
- if ( (rc = ops->read_std(_regs.esp, &dst.val, dst.bytes)) != 0 )
+ if ( (rc = ops->read_std(register_address(_regs.ss, _regs.esp),
+ &dst.val, dst.bytes)) != 0 )
goto done;
- _regs.esp += dst.bytes;
+ register_address_increment(_regs.esp, dst.bytes);
break;
case 0xc0 ... 0xc1: grp2: /* Grp2 */
switch ( modrm_reg )
@@ -797,16 +849,17 @@
emulate_1op("dec", dst, _regs.eflags);
break;
case 6: /* push */
- /* 64-bit mode: PUSH defaults to 64-bit operands. */
- if ( (mode == 8) && (dst.bytes == 4) )
+ /* 64-bit mode: PUSH always pushes a 64-bit operand. */
+ if ( mode == X86EMUL_MODE_PROT64 )
{
dst.bytes = 8;
if ( (rc = ops->read_std((unsigned long)dst.ptr,
&dst.val, 8)) != 0 )
goto done;
}
- _regs.esp -= dst.bytes;
- if ( (rc = ops->write_std(_regs.esp, dst.val, dst.bytes)) != 0 )
+ register_address_increment(_regs.esp, -dst.bytes);
+ if ( (rc = ops->write_std(register_address(_regs.ss, _regs.esp),
+ dst.val, dst.bytes)) != 0 )
goto done;
dst.val = dst.orig_val; /* skanky: disable writeback */
break;
@@ -873,19 +926,22 @@
{
/* Write fault: destination is special memory. */
dst.ptr = (unsigned long *)cr2;
- if ( (rc = ops->read_std(_regs.esi - _regs.edi + cr2,
+ if ( (rc = ops->read_std(register_address(seg ? *seg : _regs.ds,
+ _regs.esi),
&dst.val, dst.bytes)) != 0 )
goto done;
}
else
{
/* Read fault: source is special memory. */
- dst.ptr = (unsigned long *)(_regs.edi - _regs.esi + cr2);
+ dst.ptr = (unsigned long *)register_address(_regs.es, _regs.edi);
if ( (rc = ops->read_emulated(cr2, &dst.val, dst.bytes)) != 0 )
goto done;
}
- _regs.esi += (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes;
- _regs.edi += (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes;
+ register_address_increment(
+ _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
+ register_address_increment(
+ _regs.edi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
break;
case 0xa6 ... 0xa7: /* cmps */
DPRINTF("Urk! I don't handle CMPS.\n");
@@ -895,7 +951,8 @@
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
dst.ptr = (unsigned long *)cr2;
dst.val = _regs.eax;
- _regs.edi += (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes;
+ register_address_increment(
+ _regs.edi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
break;
case 0xac ... 0xad: /* lods */
dst.type = OP_REG;
@@ -903,7 +960,8 @@
dst.ptr = (unsigned long *)&_regs.eax;
if ( (rc = ops->read_emulated(cr2, &dst.val, dst.bytes)) != 0 )
goto done;
- _regs.esi += (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes;
+ register_address_increment(
+ _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
break;
case 0xae ... 0xaf: /* scas */
DPRINTF("Urk! I don't handle SCAS.\n");
diff -r 903fb46f240e -r cd914808acf1 xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c Tue Jan 3 14:59:00 2006
+++ b/xen/common/dom0_ops.c Tue Jan 3 16:19:20 2006
@@ -16,6 +16,7 @@
#include <xen/domain_page.h>
#include <xen/trace.h>
#include <xen/console.h>
+#include <xen/iocap.h>
#include <asm/current.h>
#include <public/dom0_ops.h>
#include <public/sched_ctl.h>
@@ -582,6 +583,7 @@
}
}
break;
+
case DOM0_SETDEBUGGING:
{
struct domain *d;
@@ -596,6 +598,53 @@
put_domain(d);
ret = 0;
}
+ }
+ break;
+
+ case DOM0_IRQ_PERMISSION:
+ {
+ struct domain *d;
+ unsigned int pirq = op->u.irq_permission.pirq;
+
+ ret = -EINVAL;
+ if ( pirq >= NR_PIRQS )
+ break;
+
+ ret = -ESRCH;
+ d = find_domain_by_id(op->u.irq_permission.domain);
+ if ( d == NULL )
+ break;
+
+ if ( op->u.irq_permission.allow_access )
+ ret = irq_permit_access(d, pirq);
+ else
+ ret = irq_deny_access(d, pirq);
+
+ put_domain(d);
+ }
+ break;
+
+ case DOM0_IOMEM_PERMISSION:
+ {
+ struct domain *d;
+ unsigned long pfn = op->u.iomem_permission.first_pfn;
+ unsigned long nr_pfns = op->u.iomem_permission.nr_pfns;
+
+ ret = -EINVAL;
+ if ( (pfn + nr_pfns - 1) < pfn ) /* wrap? */
+ break;
+
+ ret = -ESRCH;
+ d = find_domain_by_id(op->u.iomem_permission.domain);
+ if ( d == NULL )
+ break;
+
+ if ( op->u.iomem_permission.allow_access )
+ ret = iomem_permit_access(d, pfn, pfn + nr_pfns - 1);
+ else
+ ret = iomem_deny_access(d, pfn, pfn + nr_pfns - 1);
+
+ put_domain(d);
}
break;
diff -r 903fb46f240e -r cd914808acf1 xen/common/domain.c
--- a/xen/common/domain.c Tue Jan 3 14:59:00 2006
+++ b/xen/common/domain.c Tue Jan 3 16:19:20 2006
@@ -16,6 +16,7 @@
#include <xen/console.h>
#include <xen/softirq.h>
#include <xen/domain_page.h>
+#include <xen/rangeset.h>
#include <asm/debugger.h>
#include <public/dom0_ops.h>
#include <public/sched.h>
@@ -52,22 +53,21 @@
if ( !is_idle_task(d) &&
((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
- {
- evtchn_destroy(d);
- free_domain(d);
- return NULL;
- }
+ goto fail1;
if ( (v = alloc_vcpu(d, 0, cpu)) == NULL )
- {
- grant_table_destroy(d);
- evtchn_destroy(d);
- free_domain(d);
- return NULL;
- }
-
- arch_do_createdomain(v);
-
+ goto fail2;
+
+ rangeset_domain_initialise(d);
+
+ d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
+ d->irq_caps = rangeset_new(d, "Interrupts", 0);
+
+ if ( (d->iomem_caps == NULL) ||
+ (d->irq_caps == NULL) ||
+ (arch_do_createdomain(v) != 0) )
+ goto fail3;
+
if ( !is_idle_task(d) )
{
write_lock(&domlist_lock);
@@ -83,6 +83,15 @@
}
return d;
+
+ fail3:
+ rangeset_domain_destroy(d);
+ fail2:
+ grant_table_destroy(d);
+ fail1:
+ evtchn_destroy(d);
+ free_domain(d);
+ return NULL;
}
@@ -271,6 +280,8 @@
*pd = d->next_in_hashbucket;
write_unlock(&domlist_lock);
+ rangeset_domain_destroy(d);
+
evtchn_destroy(d);
grant_table_destroy(d);
diff -r 903fb46f240e -r cd914808acf1 xen/common/event_channel.c
--- a/xen/common/event_channel.c Tue Jan 3 14:59:00 2006
+++ b/xen/common/event_channel.c Tue Jan 3 16:19:20 2006
@@ -22,6 +22,7 @@
#include <xen/sched.h>
#include <xen/event.h>
#include <xen/irq.h>
+#include <xen/iocap.h>
#include <asm/current.h>
#include <public/xen.h>
@@ -241,6 +242,9 @@
if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
return -EINVAL;
+
+ if ( !irq_access_permitted(d, pirq) )
+ return -EPERM;
spin_lock(&d->evtchn_lock);
diff -r 903fb46f240e -r cd914808acf1 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c Tue Jan 3 14:59:00 2006
+++ b/xen/common/keyhandler.c Tue Jan 3 16:19:20 2006
@@ -11,6 +11,7 @@
#include <xen/sched.h>
#include <xen/softirq.h>
#include <xen/domain.h>
+#include <xen/rangeset.h>
#include <asm/debugger.h>
#define KEY_MAX 256
@@ -109,31 +110,32 @@
for_each_domain ( d )
{
- printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
- "xenheap_pages=%d\n", d->domain_id, d->domain_flags,
- atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
- /* The handle is printed according to the OSF DCE UUID spec., even
- though it is not necessarily such a thing, for ease of use when it
- _is_ one of those. */
- printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
+ printk("General information for domain %u:\n", d->domain_id);
+ printk(" flags=%lx refcnt=%d nr_pages=%d xenheap_pages=%d\n",
+ d->domain_flags, atomic_read(&d->refcnt),
+ d->tot_pages, d->xenheap_pages);
+ printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x-%02x%02x%02x%02x%02x%02x\n",
d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7],
d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11],
d->handle[12], d->handle[13], d->handle[14], d->handle[15]);
+ rangeset_domain_printk(d);
+
dump_pageframe_info(d);
+ printk("VCPU information and callbacks for domain %u:\n",
+ d->domain_id);
for_each_vcpu ( d, v ) {
- printk("Guest: %p CPU %d [has=%c] flags=%lx "
- "upcall_pend = %02x, upcall_mask = %02x\n", v,
- v->processor,
+ printk(" VCPU%d: CPU%d [has=%c] flags=%lx "
+ "upcall_pend = %02x, upcall_mask = %02x\n",
+ v->vcpu_id, v->processor,
test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F',
v->vcpu_flags,
v->vcpu_info->evtchn_upcall_pending,
v->vcpu_info->evtchn_upcall_mask);
- printk("Notifying guest... %d/%d\n", d->domain_id, v->vcpu_id);
- printk("port %d/%d stat %d %d %d\n",
+ printk(" Notifying guest (virq %d, port %d, stat %d/%d/%d)\n",
VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
&d->shared_info->evtchn_pending[0]),
diff -r 903fb46f240e -r cd914808acf1 xen/common/memory.c
--- a/xen/common/memory.c Tue Jan 3 14:59:00 2006
+++ b/xen/common/memory.c Tue Jan 3 16:19:20 2006
@@ -15,6 +15,7 @@
#include <xen/sched.h>
#include <xen/event.h>
#include <xen/shadow.h>
+#include <xen/iocap.h>
#include <asm/current.h>
#include <asm/hardirq.h>
#include <public/memory.h>
@@ -35,7 +36,8 @@
!array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
return 0;
- if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
+ if ( (extent_order != 0) &&
+ !multipage_allocation_permitted(current->domain) )
{
DPRINTK("Only I/O-capable domains may allocate multi-page extents.\n");
return 0;
diff -r 903fb46f240e -r cd914808acf1 xen/drivers/char/ns16550.c
--- a/xen/drivers/char/ns16550.c Tue Jan 3 14:59:00 2006
+++ b/xen/drivers/char/ns16550.c Tue Jan 3 16:19:20 2006
@@ -13,6 +13,7 @@
#include <xen/irq.h>
#include <xen/sched.h>
#include <xen/serial.h>
+#include <xen/iocap.h>
#include <asm/io.h>
/*
@@ -233,11 +234,11 @@
}
#ifdef CONFIG_X86
-#include <asm/physdev.h>
static void ns16550_endboot(struct serial_port *port)
{
struct ns16550 *uart = port->uart;
- physdev_modify_ioport_access_range(dom0, 0, uart->io_base, 8);
+ if ( ioports_deny_access(dom0, uart->io_base, uart->io_base + 7) != 0 )
+ BUG();
}
#else
#define ns16550_endboot NULL
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/asm-ia64/domain.h Tue Jan 3 16:19:20 2006
@@ -10,7 +10,7 @@
#include <asm/vmx_platform.h>
#include <xen/list.h>
-extern void arch_do_createdomain(struct vcpu *);
+extern int arch_do_createdomain(struct vcpu *);
extern void domain_relinquish_resources(struct domain *);
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-x86/current.h
--- a/xen/include/asm-x86/current.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/asm-x86/current.h Tue Jan 3 16:19:20 2006
@@ -49,7 +49,7 @@
#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
"mov %0,%%"__OP"sp; jmp "STR(__fn) \
- : : "r" (guest_cpu_user_regs()) )
+ : : "r" (guest_cpu_user_regs()) : "memory" )
#define schedule_tail(_ed) (((_ed)->arch.schedule_tail)(_ed))
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/asm-x86/domain.h Tue Jan 3 16:19:20 2006
@@ -24,8 +24,8 @@
/* Writable pagetables. */
struct ptwr_info ptwr[2];
- /* I/O-port access bitmap mask. */
- u8 *iobmp_mask; /* Address of IO bitmap mask, or NULL. */
+ /* I/O-port admin-specified access capabilities. */
+ struct rangeset *ioport_caps;
/* Shadow mode status and controls. */
struct shadow_ops *ops;
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-x86/msr.h
--- a/xen/include/asm-x86/msr.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/asm-x86/msr.h Tue Jan 3 16:19:20 2006
@@ -12,7 +12,7 @@
__asm__ __volatile__("rdmsr" \
: "=a" (a__), "=d" (b__) \
: "c" (msr)); \
- val = a__ | (b__<<32); \
+ val = a__ | ((u64)b__<<32); \
} while(0);
#define wrmsr(msr,val1,val2) \
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-x86/x86_emulate.h
--- a/xen/include/asm-x86/x86_emulate.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/asm-x86/x86_emulate.h Tue Jan 3 16:19:20 2006
@@ -141,6 +141,12 @@
struct cpu_user_regs;
+/* Current execution mode, passed to the emulator. */
+#define X86EMUL_MODE_REAL 0
+#define X86EMUL_MODE_PROT16 2
+#define X86EMUL_MODE_PROT32 4
+#define X86EMUL_MODE_PROT64 8
+
/*
* x86_emulate_memop: Emulate an instruction that faulted attempting to
* read/write a 'special' memory area.
@@ -149,6 +155,8 @@
* @ops: Interface to access special memory.
* @mode: Current execution mode, represented by the default size of memory
* addresses, in bytes. Valid values are 2, 4 and 8 (x86/64 only).
+ * Alternatively use the appropriate X86EMUL_MODE value (which also
+ * includes a value for emulating real mode).
*/
extern int
x86_emulate_memop(
diff -r 903fb46f240e -r cd914808acf1 xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/public/dom0_ops.h Tue Jan 3 16:19:20 2006
@@ -410,6 +410,21 @@
uint8_t enable;
} dom0_setdebugging_t;
+#define DOM0_IRQ_PERMISSION 46
+typedef struct {
+ domid_t domain; /* domain to be affected */
+ uint8_t pirq;
+ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
+} dom0_irq_permission_t;
+
+#define DOM0_IOMEM_PERMISSION 47
+typedef struct {
+ domid_t domain; /* domain to be affected */
+ unsigned long first_pfn; /* first page (physical page number) in range */
+ unsigned long nr_pfns; /* number of pages in range (>0) */
+ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
+} dom0_iomem_permission_t;
+
typedef struct {
uint32_t cmd;
uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
@@ -448,6 +463,8 @@
dom0_max_vcpus_t max_vcpus;
dom0_setdomainhandle_t setdomainhandle;
dom0_setdebugging_t setdebugging;
+ dom0_irq_permission_t irq_permission;
+ dom0_iomem_permission_t iomem_permission;
uint8_t pad[128];
} u;
} dom0_op_t;
diff -r 903fb46f240e -r cd914808acf1 xen/include/xen/compiler.h
--- a/xen/include/xen/compiler.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/xen/compiler.h Tue Jan 3 16:19:20 2006
@@ -19,4 +19,10 @@
#define __attribute_used__ __attribute__((__unused__))
#endif
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+#define __must_check __attribute__((warn_unused_result))
+#else
+#define __must_check
+#endif
+
#endif /* __LINUX_COMPILER_H */
diff -r 903fb46f240e -r cd914808acf1 xen/include/xen/domain.h
--- a/xen/include/xen/domain.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/xen/domain.h Tue Jan 3 16:19:20 2006
@@ -13,9 +13,9 @@
extern void free_vcpu_struct(struct vcpu *v);
-extern void arch_do_createdomain(struct vcpu *v);
+extern int arch_do_createdomain(struct vcpu *v);
-extern int arch_set_info_guest(
+extern int arch_set_info_guest(
struct vcpu *v, struct vcpu_guest_context *c);
extern void vcpu_migrate_cpu(struct vcpu *v, int newcpu);
diff -r 903fb46f240e -r cd914808acf1 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Tue Jan 3 14:59:00 2006
+++ b/xen/include/xen/sched.h Tue Jan 3 16:19:20 2006
@@ -11,6 +11,7 @@
#include <xen/time.h>
#include <xen/ac_timer.h>
#include <xen/grant_table.h>
+#include <xen/rangeset.h>
#include <asm/domain.h>
extern unsigned long volatile jiffies;
@@ -110,6 +111,9 @@
struct domain *next_in_list;
struct domain *next_in_hashbucket;
+ struct list_head rangesets;
+ spinlock_t rangesets_lock;
+
/* Event channel information. */
struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
spinlock_t evtchn_lock;
@@ -124,6 +128,10 @@
#define NR_PIRQS 256 /* Put this somewhere sane! */
u16 pirq_to_evtchn[NR_PIRQS];
u32 pirq_mask[NR_PIRQS/32];
+
+ /* I/O capabilities (access to IRQs and memory-mapped I/O). */
+ struct rangeset *iomem_caps;
+ struct rangeset *irq_caps;
unsigned long domain_flags;
unsigned long vm_assist;
@@ -378,23 +386,20 @@
/* Is this domain privileged? */
#define _DOMF_privileged 1
#define DOMF_privileged (1UL<<_DOMF_privileged)
- /* May this domain do IO to physical devices? */
-#define _DOMF_physdev_access 2
-#define DOMF_physdev_access (1UL<<_DOMF_physdev_access)
/* Guest shut itself down for some reason. */
-#define _DOMF_shutdown 3
+#define _DOMF_shutdown 2
#define DOMF_shutdown (1UL<<_DOMF_shutdown)
/* Guest is in process of shutting itself down (becomes DOMF_shutdown). */
-#define _DOMF_shuttingdown 4
+#define _DOMF_shuttingdown 3
#define DOMF_shuttingdown (1UL<<_DOMF_shuttingdown)
/* Death rattle. */
-#define _DOMF_dying 5
+#define _DOMF_dying 4
#define DOMF_dying (1UL<<_DOMF_dying)
/* Domain is paused by controller software. */
-#define _DOMF_ctrl_pause 6
+#define _DOMF_ctrl_pause 5
#define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause)
/* Domain is being debugged by controller software. */
-#define _DOMF_debugging 7
+#define _DOMF_debugging 6
#define DOMF_debugging (1UL<<_DOMF_debugging)
@@ -422,8 +427,6 @@
#define IS_PRIV(_d) \
(test_bit(_DOMF_privileged, &(_d)->domain_flags))
-#define IS_CAPABLE_PHYSDEV(_d) \
- (test_bit(_DOMF_physdev_access, &(_d)->domain_flags))
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
diff -r 903fb46f240e -r cd914808acf1 tools/libxc/xc_pagetab.c
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/tools/libxc/xc_pagetab.c Tue Jan 3 16:19:20 2006
@@ -0,0 +1,192 @@
+/******************************************************************************
+ * xc_pagetab.c
+ *
+ * Function to translate virtual to physical addresses.
+ */
+#include "xc_private.h"
+
+#if defined(__i386__)
+
+#define L1_PAGETABLE_SHIFT_PAE 12
+#define L2_PAGETABLE_SHIFT_PAE 21
+#define L3_PAGETABLE_SHIFT_PAE 30
+
+#define L1_PAGETABLE_SHIFT 12
+#define L2_PAGETABLE_SHIFT 22
+
+#define L0_PAGETABLE_MASK_PAE 0x0000000ffffff000ULL
+#define L1_PAGETABLE_MASK_PAE 0x1ffULL
+#define L2_PAGETABLE_MASK_PAE 0x1ffULL
+#define L3_PAGETABLE_MASK_PAE 0x3ULL
+
+#define L0_PAGETABLE_MASK 0xfffff000ULL
+#define L1_PAGETABLE_MASK 0x3ffULL
+#define L2_PAGETABLE_MASK 0x3ffULL
+
+#elif defined(__x86_64__)
+
+#define L1_PAGETABLE_SHIFT_PAE 12
+#define L2_PAGETABLE_SHIFT_PAE 21
+#define L3_PAGETABLE_SHIFT_PAE 30
+#define L4_PAGETABLE_SHIFT_PAE 39
+
+#define L1_PAGETABLE_SHIFT L1_PAGETABLE_SHIFT_PAE
+#define L2_PAGETABLE_SHIFT L2_PAGETABLE_SHIFT_PAE
+
+#define L0_PAGETABLE_MASK_PAE 0x000000fffffff000ULL
+#define L1_PAGETABLE_MASK_PAE 0x1ffULL
+#define L2_PAGETABLE_MASK_PAE 0x1ffULL
+#define L3_PAGETABLE_MASK_PAE 0x1ffULL
+#define L4_PAGETABLE_MASK_PAE 0x1ffULL
+
+#define L0_PAGETABLE_MASK L0_PAGETABLE_MASK_PAE
+#define L1_PAGETABLE_MASK L1_PAGETABLE_MASK_PAE
+#define L2_PAGETABLE_MASK L2_PAGETABLE_MASK_PAE
+
+#endif
+
+unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
+ int vcpu, unsigned long long virt )
+{
+ vcpu_guest_context_t ctx;
+ unsigned long long cr3;
+ void *pd, *pt, *pdppage = NULL, *pdp, *pml = NULL;
+ unsigned long long pde, pte, pdpe, pmle;
+ unsigned long mfn = 0;
+#if defined (__i386__)
+ static int pt_levels = 0;
+
+ if (pt_levels == 0) {
+ xen_capabilities_info_t xen_caps = "";
+
+ if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0)
+ goto out;
+ if (strstr(xen_caps, "xen-3.0-x86_64"))
+ pt_levels = 4;
+ else if (strstr(xen_caps, "xen-3.0-x86_32p"))
+ pt_levels = 3;
+ else if (strstr(xen_caps, "xen-3.0-x86_32"))
+ pt_levels = 2;
+ else
+ goto out;
+ }
+#elif defined (__x86_64__)
+#define pt_levels 4
+#endif
+
+ if (xc_domain_get_vcpu_context(xc_handle, dom, vcpu, &ctx) != 0) {
+ fprintf(stderr, "failed to retreive vcpu context\n");
+ goto out;
+ }
+ cr3 = ctx.ctrlreg[3];
+
+ /* Page Map Level 4 */
+
+#if defined(__i386__)
+ pmle = cr3;
+#elif defined(__x86_64__)
+ pml = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ, cr3 >>
PAGE_SHIFT);
+ if (pml == NULL) {
+ fprintf(stderr, "failed to map PML4\n");
+ goto out;
+ }
+ pmle = *(unsigned long long *)(pml + 8 * ((virt >> L4_PAGETABLE_SHIFT_PAE)
& L4_PAGETABLE_MASK_PAE));
+ if((pmle & 1) == 0) {
+ fprintf(stderr, "page entry not present in PML4\n");
+ goto out_unmap_pml;
+ }
+#endif
+
+ /* Page Directory Pointer Table */
+
+ if (pt_levels >= 3) {
+ pdppage = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
pmle >> PAGE_SHIFT);
+ if (pdppage == NULL) {
+ fprintf(stderr, "failed to map PDP\n");
+ goto out_unmap_pml;
+ }
+ if (pt_levels >= 4)
+ pdp = pdppage;
+ else
+ /* PDP is only 32 bit aligned with 3 level pts */
+ pdp = pdppage + (pmle & ~(XC_PAGE_MASK | 0x1f));
+
+ pdpe = *(unsigned long long *)(pdp + 8 * ((virt >>
L3_PAGETABLE_SHIFT_PAE) & L3_PAGETABLE_MASK_PAE));
+
+ if((pdpe & 1) == 0) {
+ fprintf(stderr, "page entry not present in PDP\n");
+ goto out_unmap_pdp;
+ }
+ } else {
+ pdpe = pmle;
+ }
+
+ /* Page Directory */
+
+ pd = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ, pdpe >>
PAGE_SHIFT);
+ if (pd == NULL) {
+ fprintf(stderr, "failed to map PD\n");
+ goto out_unmap_pdp;
+ }
+
+ if (pt_levels >= 3)
+ pde = *(unsigned long long *)(pd + 8 * ((virt >>
L2_PAGETABLE_SHIFT_PAE) & L2_PAGETABLE_MASK_PAE));
+ else
+ pde = *(unsigned long long *)(pd + 4 * ((virt >> L2_PAGETABLE_SHIFT) &
L2_PAGETABLE_MASK));
+
+ if ((pde & 1) == 0) {
+ fprintf(stderr, "page entry not present in PD\n");
+ goto out_unmap_pd;
+ }
+
+ /* Page Table */
+
+ if (pde & 0x00000008) { /* 4M page (or 2M in PAE mode) */
+ fprintf(stderr, "Cannot currently cope with 2/4M pages\n");
+ exit(-1);
+ } else { /* 4k page */
+ pt = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
+ pde >> PAGE_SHIFT);
+
+ if (pt == NULL) {
+ fprintf(stderr, "failed to map PT\n");
+ goto out_unmap_pd;
+ }
+
+ if (pt_levels >= 3)
+ pte = *(unsigned long long *)(pt + 8 * ((virt >>
L1_PAGETABLE_SHIFT_PAE) & L1_PAGETABLE_MASK_PAE));
+ else
+ pte = *(unsigned long long *)(pt + 4 * ((virt >>
L1_PAGETABLE_SHIFT) & L1_PAGETABLE_MASK));
+
+ if ((pte & 0x00000001) == 0) {
+ fprintf(stderr, "page entry not present in PT\n");
+ goto out_unmap_pt;
+ }
+
+ if (pt_levels >= 3)
+ mfn = (pte & L0_PAGETABLE_MASK_PAE) >> PAGE_SHIFT;
+ else
+ mfn = (pte & L0_PAGETABLE_MASK) >> PAGE_SHIFT;
+ }
+
+ out_unmap_pt:
+ munmap(pt, PAGE_SIZE);
+ out_unmap_pd:
+ munmap(pd, PAGE_SIZE);
+ out_unmap_pdp:
+ munmap(pdppage, PAGE_SIZE);
+ out_unmap_pml:
+ munmap(pml, PAGE_SIZE);
+ out:
+ return mfn;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/x86_32/xen.lds.S
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/x86_32/xen.lds.S Tue Jan 3 16:19:20 2006
@@ -0,0 +1,85 @@
+/* ld script to make i386 Linux kernel
+ * Written by Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>
+ * Modified for i386 Xen by Keir Fraser
+ */
+
+#include <xen/config.h>
+#include <asm/page.h>
+#undef ENTRY
+#undef ALIGN
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(start)
+PHDRS
+{
+ text PT_LOAD ;
+}
+SECTIONS
+{
+ . = 0xFF000000 + 0x100000;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.fixup)
+ *(.gnu.warning)
+ } :text =0x9090
+ .text.lock : { *(.text.lock) } :text /* out-of-line lock text */
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) } :text
+
+ . = ALIGN(32); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) } :text
+ __stop___ex_table = .;
+
+ . = ALIGN(32); /* Pre-exception table */
+ __start___pre_ex_table = .;
+ __pre_ex_table : { *(__pre_ex_table) } :text
+ __stop___pre_ex_table = .;
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ } :text
+
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .text.init : { *(.text.init) } :text
+ .data.init : { *(.data.init) } :text
+ . = ALIGN(32);
+ __setup_start = .;
+ .setup.init : { *(.setup.init) } :text
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : { *(.initcall.init) } :text
+ __initcall_end = .;
+ . = ALIGN(STACK_SIZE);
+ __init_end = .;
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss.stack_aligned)
+ *(.bss.page_aligned)
+ *(.bss)
+ } :text
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/x86_64/xen.lds.S
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/xen/arch/x86/x86_64/xen.lds.S Tue Jan 3 16:19:20 2006
@@ -0,0 +1,83 @@
+/* Excerpts written by Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx> */
+/* Modified for x86-64 Xen by Keir Fraser */
+
+#include <xen/config.h>
+#include <asm/page.h>
+#undef ENTRY
+#undef ALIGN
+
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(start)
+PHDRS
+{
+ text PT_LOAD ;
+}
+SECTIONS
+{
+ . = 0xFFFF830000100000;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0x9090
+ .text.lock : { *(.text.lock) } :text /* out-of-line lock text */
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) } :text
+
+ . = ALIGN(32); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) } :text
+ __stop___ex_table = .;
+
+ . = ALIGN(32); /* Pre-exception table */
+ __start___pre_ex_table = .;
+ __pre_ex_table : { *(__pre_ex_table) } :text
+ __stop___pre_ex_table = .;
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ } :text
+
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .text.init : { *(.text.init) } :text
+ .data.init : { *(.data.init) } :text
+ . = ALIGN(32);
+ __setup_start = .;
+ .setup.init : { *(.setup.init) } :text
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : { *(.initcall.init) } :text
+ __initcall_end = .;
+ . = ALIGN(STACK_SIZE);
+ __init_end = .;
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss.stack_aligned)
+ *(.bss.page_aligned)
+ *(.bss)
+ } :text
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff -r 903fb46f240e -r cd914808acf1 xen/common/rangeset.c
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/xen/common/rangeset.c Tue Jan 3 16:19:20 2006
@@ -0,0 +1,399 @@
+/******************************************************************************
+ * rangeset.c
+ *
+ * Creation, maintenance and automatic destruction of per-domain sets of
+ * numeric ranges.
+ *
+ * Copyright (c) 2005, K A Fraser
+ */
+
+#include <xen/sched.h>
+#include <xen/rangeset.h>
+
+/* An inclusive range [s,e] and pointer to next range in ascending order. */
+struct range {
+ struct list_head list;
+ unsigned long s, e;
+};
+
+struct rangeset {
+ /* Owning domain and threaded list of rangesets. */
+ struct list_head rangeset_list;
+ struct domain *domain;
+
+ /* Ordered list of ranges contained in this set, and protecting lock. */
+ struct list_head range_list;
+ spinlock_t lock;
+
+ /* Pretty-printing name. */
+ char name[32];
+
+ /* RANGESETF flags. */
+ unsigned int flags;
+};
+
+/*****************************
+ * Private range functions hide the underlying linked-list implemnetation.
+ */
+
+/* Find highest range lower than or containing s. NULL if no such range. */
+static struct range *find_range(
+ struct rangeset *r, unsigned long s)
+{
+ struct range *x = NULL, *y;
+
+ list_for_each_entry ( y, &r->range_list, list )
+ {
+ if ( y->s > s )
+ break;
+ x = y;
+ }
+
+ return x;
+}
+
+/* Return the lowest range in the set r, or NULL if r is empty. */
+static struct range *first_range(
+ struct rangeset *r)
+{
+ if ( list_empty(&r->range_list) )
+ return NULL;
+ return list_entry(r->range_list.next, struct range, list);
+}
+
+/* Return range following x in ascending order, or NULL if x is the highest. */
+static struct range *next_range(
+ struct rangeset *r, struct range *x)
+{
+ if ( x->list.next == &r->range_list )
+ return NULL;
+ return list_entry(x->list.next, struct range, list);
+}
+
+/* Insert range y after range x in r. Insert as first range if x is NULL. */
+static void insert_range(
+ struct rangeset *r, struct range *x, struct range *y)
+{
+ list_add(&y->list, (x != NULL) ? &x->list : &r->range_list);
+}
+
+/* Remove a range from its list and free it. */
+static void destroy_range(
+ struct range *x)
+{
+ list_del(&x->list);
+ xfree(x);
+}
+
+/*****************************
+ * Core public functions
+ */
+
+int rangeset_add_range(
+ struct rangeset *r, unsigned long s, unsigned long e)
+{
+ struct range *x, *y;
+ int rc = 0;
+
+ spin_lock(&r->lock);
+
+ x = find_range(r, s);
+ y = find_range(r, e);
+
+ if ( x == y )
+ {
+ if ( (x == NULL) || ((x->e < s) && ((x->e + 1) != s)) )
+ {
+ x = xmalloc(struct range);
+ if ( x == NULL )
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ x->s = s;
+ x->e = e;
+
+ insert_range(r, y, x);
+ }
+ else if ( x->e < e )
+ x->e = e;
+ }
+ else
+ {
+ if ( x == NULL )
+ {
+ x = first_range(r);
+ x->s = s;
+ }
+ else if ( (x->e < s) && ((x->e + 1) != s) )
+ {
+ x = next_range(r, x);
+ x->s = s;
+ }
+
+ x->e = (y->e > e) ? y->e : e;
+
+ for ( ; ; )
+ {
+ y = next_range(r, x);
+ if ( (y == NULL) || (y->e > x->e) )
+ break;
+ destroy_range(y);
+ }
+ }
+
+ y = next_range(r, x);
+ if ( (y != NULL) && ((x->e + 1) == y->s) )
+ {
+ x->e = y->e;
+ destroy_range(y);
+ }
+
+ out:
+ spin_unlock(&r->lock);
+ return rc;
+}
+
+int rangeset_remove_range(
+ struct rangeset *r, unsigned long s, unsigned long e)
+{
+ struct range *x, *y, *t;
+ int rc = 0;
+
+ spin_lock(&r->lock);
+
+ x = find_range(r, s);
+ y = find_range(r, e);
+
+ if ( x == y )
+ {
+ if ( (x == NULL) || (x->e < s) )
+ goto out;
+
+ if ( (x->s < s) && (x->e > e) )
+ {
+ y = xmalloc(struct range);
+ if ( y == NULL )
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ y->s = e + 1;
+ y->e = x->e;
+ x->e = s - 1;
+
+ insert_range(r, x, y);
+ }
+ else if ( (x->s == s) && (x->e <= e) )
+ destroy_range(x);
+ else if ( x->s == s )
+ x->s = e + 1;
+ else if ( x->e <= e )
+ x->e = s - 1;
+ }
+ else
+ {
+ if ( x == NULL )
+ x = first_range(r);
+
+ if ( x->s < s )
+ {
+ x->e = s - 1;
+ x = next_range(r, x);
+ }
+
+ while ( x != y )
+ {
+ t = x;
+ x = next_range(r, x);
+ destroy_range(t);
+ }
+
+ x->s = e + 1;
+ if ( x->s > x->e )
+ destroy_range(x);
+ }
+
+ out:
+ spin_unlock(&r->lock);
+ return rc;
+}
+
+int rangeset_contains_range(
+ struct rangeset *r, unsigned long s, unsigned long e)
+{
+ struct range *x;
+ int contains;
+
+ spin_lock(&r->lock);
+ x = find_range(r, s);
+ contains = (x && (x->e >= e));
+ spin_unlock(&r->lock);
+
+ return contains;
+}
+
+int rangeset_add_singleton(
+ struct rangeset *r, unsigned long s)
+{
+ return rangeset_add_range(r, s, s);
+}
+
+int rangeset_remove_singleton(
+ struct rangeset *r, unsigned long s)
+{
+ return rangeset_remove_range(r, s, s);
+}
+
+int rangeset_contains_singleton(
+ struct rangeset *r, unsigned long s)
+{
+ return rangeset_contains_range(r, s, s);
+}
+
+int rangeset_is_empty(
+ struct rangeset *r)
+{
+ return list_empty(&r->range_list);
+}
+
+struct rangeset *rangeset_new(
+ struct domain *d, char *name, unsigned int flags)
+{
+ struct rangeset *r;
+
+ r = xmalloc(struct rangeset);
+ if ( r == NULL )
+ return NULL;
+
+ spin_lock_init(&r->lock);
+ INIT_LIST_HEAD(&r->range_list);
+
+ BUG_ON(flags & ~RANGESETF_prettyprint_hex);
+ r->flags = flags;
+
+ if ( name != NULL )
+ {
+ strncpy(r->name, name, sizeof(r->name));
+ r->name[sizeof(r->name)-1] = '\0';
+ }
+ else
+ {
+ sprintf(r->name, "(no name)");
+ }
+
+ if ( (r->domain = d) != NULL )
+ {
+ spin_lock(&d->rangesets_lock);
+ list_add(&r->rangeset_list, &d->rangesets);
+ spin_unlock(&d->rangesets_lock);
+ }
+
+ return r;
+}
+
+void rangeset_destroy(
+ struct rangeset *r)
+{
+ struct range *x;
+
+ if ( r == NULL )
+ return;
+
+ if ( r->domain != NULL )
+ {
+ spin_lock(&r->domain->rangesets_lock);
+ list_del(&r->rangeset_list);
+ spin_unlock(&r->domain->rangesets_lock);
+ }
+
+ while ( (x = first_range(r)) != NULL )
+ destroy_range(x);
+
+ xfree(r);
+}
+
+void rangeset_domain_initialise(
+ struct domain *d)
+{
+ INIT_LIST_HEAD(&d->rangesets);
+ spin_lock_init(&d->rangesets_lock);
+}
+
+void rangeset_domain_destroy(
+ struct domain *d)
+{
+ struct rangeset *r;
+
+ while ( !list_empty(&d->rangesets) )
+ {
+ r = list_entry(d->rangesets.next, struct rangeset, rangeset_list);
+
+ BUG_ON(r->domain != d);
+ r->domain = NULL;
+ list_del(&r->rangeset_list);
+
+ rangeset_destroy(r);
+ }
+}
+
+/*****************************
+ * Pretty-printing functions
+ */
+
+static void print_limit(struct rangeset *r, unsigned long s)
+{
+ printk((r->flags & RANGESETF_prettyprint_hex) ? "%lx" : "%lu", s);
+}
+
+void rangeset_printk(
+ struct rangeset *r)
+{
+ int nr_printed = 0;
+ struct range *x;
+
+ spin_lock(&r->lock);
+
+ printk("%-10s {", r->name);
+
+ for ( x = first_range(r); x != NULL; x = next_range(r, x) )
+ {
+ if ( nr_printed++ )
+ printk(",");
+ printk(" ");
+ print_limit(r, x->s);
+ if ( x->s != x->e )
+ {
+ printk("-");
+ print_limit(r, x->e);
+ }
+ }
+
+ printk(" }");
+
+ spin_unlock(&r->lock);
+}
+
+void rangeset_domain_printk(
+ struct domain *d)
+{
+ struct rangeset *r;
+
+ printk("Rangesets belonging to domain %u:\n", d->domain_id);
+
+ spin_lock(&d->rangesets_lock);
+
+ if ( list_empty(&d->rangesets) )
+ printk(" None\n");
+
+ list_for_each_entry ( r, &d->rangesets, rangeset_list )
+ {
+ printk(" ");
+ rangeset_printk(r);
+ printk("\n");
+ }
+
+ spin_unlock(&d->rangesets_lock);
+}
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-ia64/iocap.h
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/xen/include/asm-ia64/iocap.h Tue Jan 3 16:19:20 2006
@@ -0,0 +1,10 @@
+/******************************************************************************
+ * iocap.h
+ *
+ * Architecture-specific per-domain I/O capabilities.
+ */
+
+#ifndef __IA64_IOCAP_H__
+#define __IA64_IOCAP_H__
+
+#endif /* __IA64_IOCAP_H__ */
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-x86/iocap.h
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/xen/include/asm-x86/iocap.h Tue Jan 3 16:19:20 2006
@@ -0,0 +1,20 @@
+/******************************************************************************
+ * iocap.h
+ *
+ * Architecture-specific per-domain I/O capabilities.
+ */
+
+#ifndef __X86_IOCAP_H__
+#define __X86_IOCAP_H__
+
+#define ioports_permit_access(d, s, e) \
+ rangeset_add_range((d)->arch.ioport_caps, s, e)
+#define ioports_deny_access(d, s, e) \
+ rangeset_remove_range((d)->arch.ioport_caps, s, e)
+#define ioports_access_permitted(d, s, e) \
+ rangeset_contains_range((d)->arch.ioport_caps, s, e)
+
+#define cache_flush_permitted(d) \
+ (!rangeset_is_empty((d)->iomem_caps))
+
+#endif /* __X86_IOCAP_H__ */
diff -r 903fb46f240e -r cd914808acf1 xen/include/xen/iocap.h
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/xen/include/xen/iocap.h Tue Jan 3 16:19:20 2006
@@ -0,0 +1,34 @@
+/******************************************************************************
+ * iocap.h
+ *
+ * Per-domain I/O capabilities.
+ */
+
+#ifndef __XEN_IOCAP_H__
+#define __XEN_IOCAP_H__
+
+#include <xen/rangeset.h>
+#include <asm/iocap.h>
+
+#define iomem_permit_access(d, s, e) \
+ rangeset_add_range((d)->iomem_caps, s, e)
+#define iomem_deny_access(d, s, e) \
+ rangeset_remove_range((d)->iomem_caps, s, e)
+#define iomem_access_permitted(d, s, e) \
+ rangeset_contains_range((d)->iomem_caps, s, e)
+
+#define irq_permit_access(d, i) \
+ rangeset_add_singleton((d)->irq_caps, i)
+#define irq_deny_access(d, i) \
+ rangeset_remove_singleton((d)->irq_caps, i)
+#define irqs_permit_access(d, s, e) \
+ rangeset_add_range((d)->irq_caps, s, e)
+#define irqs_deny_access(d, s, e) \
+ rangeset_remove_range((d)->irq_caps, s, e)
+#define irq_access_permitted(d, i) \
+ rangeset_contains_singleton((d)->irq_caps, i)
+
+#define multipage_allocation_permitted(d) \
+ (!rangeset_is_empty((d)->iomem_caps))
+
+#endif /* __XEN_IOCAP_H__ */
diff -r 903fb46f240e -r cd914808acf1 xen/include/xen/rangeset.h
--- /dev/null Tue Jan 3 14:59:00 2006
+++ b/xen/include/xen/rangeset.h Tue Jan 3 16:19:20 2006
@@ -0,0 +1,71 @@
+/******************************************************************************
+ * rangeset.h
+ *
+ * Creation, maintenance and automatic destruction of per-domain sets of
+ * numeric ranges.
+ *
+ * Copyright (c) 2005, K A Fraser
+ */
+
+#ifndef __XEN_RANGESET_H__
+#define __XEN_RANGESET_H__
+
+struct domain;
+struct rangeset;
+
+/*
+ * Initialise/destroy per-domain rangeset information.
+ *
+ * It is invalid to create or destroy a rangeset belonging to a domain @d
+ * before rangeset_domain_initialise(d) returns or after calling
+ * rangeset_domain_destroy(d).
+ */
+void rangeset_domain_initialise(
+ struct domain *d);
+void rangeset_domain_destroy(
+ struct domain *d);
+
+/*
+ * Create/destroy a rangeset. Optionally attach to specified domain @d for
+ * auto-destruction when the domain dies. A name may be specified, for use
+ * in debug pretty-printing, and various RANGESETF flags (defined below).
+ *
+ * It is invalid to perform any operation on a rangeset @r after calling
+ * rangeset_destroy(r).
+ */
+struct rangeset *rangeset_new(
+ struct domain *d, char *name, unsigned int flags);
+void rangeset_destroy(
+ struct rangeset *r);
+
+/* Flags for passing to rangeset_new(). */
+ /* Pretty-print range limits in hexadecimal. */
+#define _RANGESETF_prettyprint_hex 0
+#define RANGESETF_prettyprint_hex (1U << _RANGESETF_prettyprint_hex)
+
+int __must_check rangeset_is_empty(
+ struct rangeset *r);
+
+/* Add/remove/query a numeric range. */
+int __must_check rangeset_add_range(
+ struct rangeset *r, unsigned long s, unsigned long e);
+int __must_check rangeset_remove_range(
+ struct rangeset *r, unsigned long s, unsigned long e);
+int __must_check rangeset_contains_range(
+ struct rangeset *r, unsigned long s, unsigned long e);
+
+/* Add/remove/query a single number. */
+int __must_check rangeset_add_singleton(
+ struct rangeset *r, unsigned long s);
+int __must_check rangeset_remove_singleton(
+ struct rangeset *r, unsigned long s);
+int __must_check rangeset_contains_singleton(
+ struct rangeset *r, unsigned long s);
+
+/* Rangeset pretty printing. */
+void rangeset_printk(
+ struct rangeset *r);
+void rangeset_domain_printk(
+ struct domain *d);
+
+#endif /* __XEN_RANGESET_H__ */
diff -r 903fb46f240e -r cd914808acf1
linux-2.6-xen-sparse/include/asm-xen/asm-i386/bug.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/bug.h Tue Jan 3
14:59:00 2006
+++ /dev/null Tue Jan 3 16:19:20 2006
@@ -1,16 +0,0 @@
-#ifndef _I386_BUG_H
-#define _I386_BUG_H
-
-#include <linux/config.h>
-
-#define BUG() do { \
- printk("kernel BUG at %s:%d (%s)!\n", \
- __FILE__, __LINE__, __FUNCTION__); \
- dump_stack(); \
- panic("BUG!"); \
-} while (0)
-#define HAVE_ARCH_BUG
-
-#include <asm-generic/bug.h>
-
-#endif
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/x86_32/xen.lds
--- a/xen/arch/x86/x86_32/xen.lds Tue Jan 3 14:59:00 2006
+++ /dev/null Tue Jan 3 16:19:20 2006
@@ -1,79 +0,0 @@
-/* ld script to make i386 Linux kernel
- * Written by Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx>
- * Modified for i386 Xen by Keir Fraser
- */
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(start)
-PHDRS
-{
- text PT_LOAD ;
-}
-SECTIONS
-{
- . = 0xFF000000 + 0x100000;
- _text = .; /* Text and read-only data */
- .text : {
- *(.text)
- *(.fixup)
- *(.gnu.warning)
- } :text =0x9090
- .text.lock : { *(.text.lock) } :text /* out-of-line lock text */
-
- _etext = .; /* End of text section */
-
- .rodata : { *(.rodata) *(.rodata.*) } :text
-
- . = ALIGN(32); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) } :text
- __stop___ex_table = .;
-
- . = ALIGN(32); /* Pre-exception table */
- __start___pre_ex_table = .;
- __pre_ex_table : { *(__pre_ex_table) } :text
- __stop___pre_ex_table = .;
-
- .data : { /* Data */
- *(.data)
- CONSTRUCTORS
- } :text
-
- . = ALIGN(4096); /* Init code and data */
- __init_begin = .;
- .text.init : { *(.text.init) } :text
- .data.init : { *(.data.init) } :text
- . = ALIGN(32);
- __setup_start = .;
- .setup.init : { *(.setup.init) } :text
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : { *(.initcall.init) } :text
- __initcall_end = .;
- . = ALIGN(8192);
- __init_end = .;
-
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss.twopage_aligned)
- *(.bss.page_aligned)
- *(.bss)
- } :text
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.text.exit)
- *(.data.exit)
- *(.exitcall.exit)
- }
-
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-}
diff -r 903fb46f240e -r cd914808acf1 xen/arch/x86/x86_64/xen.lds
--- a/xen/arch/x86/x86_64/xen.lds Tue Jan 3 14:59:00 2006
+++ /dev/null Tue Jan 3 16:19:20 2006
@@ -1,77 +0,0 @@
-/* Excerpts written by Martin Mares <mj@xxxxxxxxxxxxxxxxxxxxxxxx> */
-/* Modified for x86-64 Xen by Keir Fraser */
-OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
-OUTPUT_ARCH(i386:x86-64)
-ENTRY(start)
-PHDRS
-{
- text PT_LOAD ;
-}
-SECTIONS
-{
- . = 0xFFFF830000100000;
- _text = .; /* Text and read-only data */
- .text : {
- *(.text)
- *(.fixup)
- *(.gnu.warning)
- } :text = 0x9090
- .text.lock : { *(.text.lock) } :text /* out-of-line lock text */
-
- _etext = .; /* End of text section */
-
- .rodata : { *(.rodata) *(.rodata.*) } :text
-
- . = ALIGN(32); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) } :text
- __stop___ex_table = .;
-
- . = ALIGN(32); /* Pre-exception table */
- __start___pre_ex_table = .;
- __pre_ex_table : { *(__pre_ex_table) } :text
- __stop___pre_ex_table = .;
-
- .data : { /* Data */
- *(.data)
- CONSTRUCTORS
- } :text
-
- . = ALIGN(4096); /* Init code and data */
- __init_begin = .;
- .text.init : { *(.text.init) } :text
- .data.init : { *(.data.init) } :text
- . = ALIGN(32);
- __setup_start = .;
- .setup.init : { *(.setup.init) } :text
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : { *(.initcall.init) } :text
- __initcall_end = .;
- . = ALIGN(8192);
- __init_end = .;
-
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss.twopage_aligned)
- *(.bss.page_aligned)
- *(.bss)
- } :text
- _end = . ;
-
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.text.exit)
- *(.data.exit)
- *(.exitcall.exit)
- }
-
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-}
diff -r 903fb46f240e -r cd914808acf1 xen/include/asm-x86/physdev.h
--- a/xen/include/asm-x86/physdev.h Tue Jan 3 14:59:00 2006
+++ /dev/null Tue Jan 3 16:19:20 2006
@@ -1,17 +0,0 @@
-/******************************************************************************
- * physdev.h
- */
-
-#ifndef __XEN_PHYSDEV_H__
-#define __XEN_PHYSDEV_H__
-
-#include <public/physdev.h>
-
-void physdev_modify_ioport_access_range(
- struct domain *d, int enable, int port, int num );
-void physdev_destroy_state(struct domain *d);
-int domain_iomem_in_pfn(struct domain *p, unsigned long pfn);
-long do_physdev_op(physdev_op_t *uop);
-void physdev_init_dom0(struct domain *d);
-
-#endif /* __XEN_PHYSDEV_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|