WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Extend max vcpu number for HVM guest

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Extend max vcpu number for HVM guest
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 29 Oct 2009 08:05:13 -0700
Delivery-date: Thu, 29 Oct 2009 08:05:34 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1256825036 0
# Node ID 444ac8fdb591ade353fc265b82e769f7b82da310
# Parent  2f9753ddd3d7fc8884708e17aac6a9abde617bfa
Extend max vcpu number for HVM guest

Reduce size of Xen-qemu shared ioreq structure to 32 bytes. This
has two advantages:
 1. We can support up to 128 VCPUs with a single shared page
 2. If/when we want to go beyond 128 VCPUs, a whole number of ioreq_t
    structures will pack into a single shared page, so a multi-page
    array will have no ioreq_t straddling a page boundary

Also, while modifying qemu, replace a 32-entry vcpu-indexed array
with a dynamically-allocated array.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 Config.mk                         |    6 +++---
 xen/arch/ia64/vmx/mmio.c          |   25 ++++---------------------
 xen/arch/ia64/vmx/vmx_init.c      |    8 +++-----
 xen/arch/ia64/vmx/vmx_support.c   |   17 ++---------------
 xen/arch/x86/hvm/emulate.c        |    4 +---
 xen/arch/x86/hvm/hvm.c            |    4 ++--
 xen/arch/x86/hvm/io.c             |   13 ++++---------
 xen/include/asm-ia64/vmx.h        |    4 ++--
 xen/include/asm-x86/hvm/support.h |    4 ++--
 xen/include/public/hvm/ioreq.h    |   32 ++++++++++++--------------------
 10 files changed, 35 insertions(+), 82 deletions(-)

diff -r 2f9753ddd3d7 -r 444ac8fdb591 Config.mk
--- a/Config.mk Thu Oct 29 11:50:09 2009 +0000
+++ b/Config.mk Thu Oct 29 14:03:56 2009 +0000
@@ -150,9 +150,9 @@ QEMU_REMOTE=http://xenbits.xensource.com
 # CONFIG_QEMU ?= ../qemu-xen.git
 CONFIG_QEMU ?= $(QEMU_REMOTE)
 
-QEMU_TAG ?= b4bb8b3f09d1c873f522f6aebe1f125a6d1854d0
-# Wed Oct 21 16:42:15 2009 +0100
-# passthrough: fix security issue with stubdoms
+QEMU_TAG ?= 3140780e451d3919ef2c81f91ae0ebe3f286eb06
+# Thu Oct 29 13:00:31 2009 +0000
+# Extend max vcpu number for HVM guest
 
 OCAML_XENSTORED_REPO=http://xenbits.xensource.com/ext/xen-ocaml-tools.hg
 
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/arch/ia64/vmx/mmio.c  Thu Oct 29 14:03:56 2009 +0000
@@ -100,7 +100,7 @@ static int hvm_buffered_io_intercept(ior
         qw = 1;
         break;
     default:
-        gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);
+        gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
         return 0;
     }
     bp.data = p->data;
@@ -139,14 +139,7 @@ static void low_mmio_access(VCPU *vcpu, 
 static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
 {
     struct vcpu *v = current;
-    vcpu_iodata_t *vio;
-    ioreq_t *p;
-
-    vio = get_vio(v);
-    if (!vio)
-        panic_domain(NULL, "bad shared page");
-
-    p = &vio->vp_ioreq;
+    ioreq_t *p = get_vio(v);
 
     p->addr = pa;
     p->size = s;
@@ -159,8 +152,6 @@ static void low_mmio_access(VCPU *vcpu, 
     p->dir = dir;
     p->df = 0;
     p->type = 1;
-
-    p->io_count++;
 
     if (hvm_buffered_io_intercept(p)) {
         p->state = STATE_IORESP_READY;
@@ -310,14 +301,8 @@ static void legacy_io_access(VCPU *vcpu,
 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
 {
     struct vcpu *v = current;
-    vcpu_iodata_t *vio;
-    ioreq_t *p;
-
-    vio = get_vio(v);
-    if (!vio)
-        panic_domain(NULL, "bad shared page\n");
-
-    p = &vio->vp_ioreq;
+    ioreq_t *p = get_vio(v);
+
     p->addr = TO_LEGACY_IO(pa & 0x3ffffffUL);
     p->size = s;
     p->count = 1;
@@ -330,8 +315,6 @@ static void legacy_io_access(VCPU *vcpu,
     p->type = 0;
     p->df = 0;
 
-    p->io_count++;
-    
     if (vmx_ide_pio_intercept(p, val))
         return;
 
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/arch/ia64/vmx/vmx_init.c      Thu Oct 29 14:03:56 2009 +0000
@@ -383,10 +383,8 @@ vmx_vcpu_initialise(struct vcpu *v)
        v->arch.arch_vmx.xen_port = rc;
 
        spin_lock(&iorp->lock);
-       if (v->domain->arch.vmx_platform.ioreq.va != 0) {
-               vcpu_iodata_t *p = get_vio(v);
-               p->vp_eport = v->arch.arch_vmx.xen_port;
-       }
+       if (v->domain->arch.vmx_platform.ioreq.va != 0)
+               get_vio(v)->vp_eport = v->arch.arch_vmx.xen_port;
        spin_unlock(&iorp->lock);
 
        gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n",
@@ -641,7 +639,7 @@ void vmx_do_resume(struct vcpu *v)
 
        /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
        /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-       p = &get_vio(v)->vp_ioreq;
+       p = get_vio(v);
        while (p->state != STATE_IOREQ_NONE) {
                switch (p->state) {
                case STATE_IORESP_READY: /* IORESP_READY -> NONE */
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c   Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/arch/ia64/vmx/vmx_support.c   Thu Oct 29 14:03:56 2009 +0000
@@ -35,19 +35,7 @@
  */
 void vmx_io_assist(struct vcpu *v)
 {
-    vcpu_iodata_t *vio;
-    ioreq_t *p;
-
-    /*
-     * This shared page contains I/O request between emulation code
-     * and device model.
-     */
-    vio = get_vio(v);
-    if (!vio)
-        panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
-                     (unsigned long)vio);
-
-    p = &vio->vp_ioreq;
+    ioreq_t *p = get_vio(v);
 
     if (p->state == STATE_IORESP_READY) {
         p->state = STATE_IOREQ_NONE;
@@ -63,9 +51,8 @@ void vmx_io_assist(struct vcpu *v)
 
 void vmx_send_assist_req(struct vcpu *v)
 {
-    ioreq_t *p;
+    ioreq_t *p = get_vio(v);
 
-    p = &get_vio(v)->vp_ioreq;
     if (unlikely(p->state != STATE_IOREQ_NONE)) {
         /* This indicates a bug in the device model.  Crash the
            domain. */
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/arch/x86/hvm/emulate.c        Thu Oct 29 14:03:56 2009 +0000
@@ -55,8 +55,7 @@ static int hvmemul_do_io(
     paddr_t value = ram_gpa;
     int value_is_ptr = (p_data == NULL);
     struct vcpu *curr = current;
-    vcpu_iodata_t *vio = get_ioreq(curr);
-    ioreq_t *p = &vio->vp_ioreq;
+    ioreq_t *p = get_ioreq(curr);
     int rc;
 
     /*
@@ -138,7 +137,6 @@ static int hvmemul_do_io(
     p->count = *reps;
     p->df = df;
     p->data = value;
-    p->io_count++;
 
     hvmtrace_io_assist(is_mmio, p);
 
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Oct 29 14:03:56 2009 +0000
@@ -256,7 +256,7 @@ void hvm_do_resume(struct vcpu *v)
     pt_restore_timer(v);
 
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    p = &get_ioreq(v)->vp_ioreq;
+    p = get_ioreq(v);
     while ( p->state != STATE_IOREQ_NONE )
     {
         switch ( p->state )
@@ -867,7 +867,7 @@ bool_t hvm_send_assist_req(struct vcpu *
     if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
         return 0; /* implicitly bins the i/o operation */
 
-    p = &get_ioreq(v)->vp_ioreq;
+    p = get_ioreq(v);
     if ( unlikely(p->state != STATE_IOREQ_NONE) )
     {
         /* This indicates a bug in the device model. Crash the domain. */
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/arch/x86/hvm/io.c     Thu Oct 29 14:03:56 2009 +0000
@@ -87,7 +87,7 @@ int hvm_buffered_io_send(ioreq_t *p)
         qw = 1;
         break;
     default:
-        gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);
+        gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
         return 0;
     }
     
@@ -148,12 +148,8 @@ void send_invalidate_req(void)
 void send_invalidate_req(void)
 {
     struct vcpu *v = current;
-    vcpu_iodata_t *vio = get_ioreq(v);
-    ioreq_t *p;
-
-    BUG_ON(vio == NULL);
-
-    p = &vio->vp_ioreq;
+    ioreq_t *p = get_ioreq(v);
+
     if ( p->state != STATE_IOREQ_NONE )
     {
         gdprintk(XENLOG_ERR, "WARNING: send invalidate req with something "
@@ -166,7 +162,6 @@ void send_invalidate_req(void)
     p->size = 4;
     p->dir = IOREQ_WRITE;
     p->data = ~0UL; /* flush all */
-    p->io_count++;
 
     (void)hvm_send_assist_req(v);
 }
@@ -221,7 +216,7 @@ void hvm_io_assist(void)
 void hvm_io_assist(void)
 {
     struct vcpu *curr = current;
-    ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
+    ioreq_t *p = get_ioreq(curr);
     enum hvm_io_state io_state;
 
     rmb(); /* see IORESP_READY /then/ read contents of ioreq */
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/include/asm-ia64/vmx.h        Thu Oct 29 14:03:56 2009 +0000
@@ -49,12 +49,12 @@ extern void vmx_pend_pal_init(struct dom
 extern void vmx_pend_pal_init(struct domain *d);
 extern void vmx_lazy_load_fpu(struct vcpu *vcpu);
 
-static inline vcpu_iodata_t *get_vio(struct vcpu *v)
+static inline ioreq_t *get_vio(struct vcpu *v)
 {
     struct domain *d = v->domain;
     shared_iopage_t *p = (shared_iopage_t *)d->arch.vmx_platform.ioreq.va;
     ASSERT((v == current) || spin_is_locked(&d->arch.vmx_platform.ioreq.lock));
     ASSERT(d->arch.vmx_platform.ioreq.va != NULL);
-    return &p->vcpu_iodata[v->vcpu_id];
+    return &p->vcpu_ioreq[v->vcpu_id];
 }
 #endif /* _ASM_IA64_VT_H */
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/include/asm-x86/hvm/support.h Thu Oct 29 14:03:56 2009 +0000
@@ -27,13 +27,13 @@
 #include <asm/regs.h>
 #include <asm/processor.h>
 
-static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
+static inline ioreq_t *get_ioreq(struct vcpu *v)
 {
     struct domain *d = v->domain;
     shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
     ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
     ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
-    return &p->vcpu_iodata[v->vcpu_id];
+    return &p->vcpu_ioreq[v->vcpu_id];
 }
 
 #define HVM_DELIVER_NO_ERROR_CODE  -1
diff -r 2f9753ddd3d7 -r 444ac8fdb591 xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h    Thu Oct 29 11:50:09 2009 +0000
+++ b/xen/include/public/hvm/ioreq.h    Thu Oct 29 14:03:56 2009 +0000
@@ -43,32 +43,24 @@
  * virq
  */
 struct ioreq {
-    uint64_t addr;          /*  physical address            */
-    uint64_t size;          /*  size in bytes               */
-    uint64_t count;         /*  for rep prefixes            */
-    uint64_t data;          /*  data (or paddr of data)     */
+    uint64_t addr;          /* physical address */
+    uint64_t data;          /* data (or paddr of data) */
+    uint32_t count;         /* for rep prefixes */
+    uint32_t size;          /* size in bytes */
+    uint32_t vp_eport;      /* evtchn for notifications to/from device model */
+    uint16_t _pad0;
     uint8_t state:4;
-    uint8_t data_is_ptr:1;  /*  if 1, data above is the guest paddr 
-                             *   of the real data to use.   */
-    uint8_t dir:1;          /*  1=read, 0=write             */
+    uint8_t data_is_ptr:1;  /* if 1, data above is the guest paddr 
+                             * of the real data to use. */
+    uint8_t dir:1;          /* 1=read, 0=write */
     uint8_t df:1;
-    uint8_t pad:1;
-    uint8_t type;           /* I/O type                     */
-    uint8_t _pad0[6];
-    uint64_t io_count;      /* How many IO done on a vcpu   */
+    uint8_t _pad1:1;
+    uint8_t type;           /* I/O type */
 };
 typedef struct ioreq ioreq_t;
 
-struct vcpu_iodata {
-    struct ioreq vp_ioreq;
-    /* Event channel port, used for notifications to/from the device model. */
-    uint32_t vp_eport;
-    uint32_t _pad0;
-};
-typedef struct vcpu_iodata vcpu_iodata_t;
-
 struct shared_iopage {
-    struct vcpu_iodata   vcpu_iodata[1];
+    struct ioreq vcpu_ioreq[1];
 };
 typedef struct shared_iopage shared_iopage_t;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Extend max vcpu number for HVM guest, Xen patchbot-unstable <=