[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4] x86/hvm: remove default ioreq server (again)



My recent patch [1] to qemu-xen-traditional removes the last use of the
'default' ioreq server in Xen. (This is a catch-all ioreq server that is
used if no explicitly registered I/O range is targetted).

This patch can be applied once that patch is committed, to remove the
(>100 lines of) redundant code in Xen.

The previous version of this patch caused a QEMU build failure. This has
been fixed by extending the #ifdef around deprecated HVM_PARAM declarations
to __XEN_TOOLS__ as well as __XEN__.

NOTE: The removal of the special case for HVM_PARAM_DM_DOMAIN in
      hvm_allow_set_param() is not directly related to removal of
      default ioreq servers. It could have been cleaned up at any time
      after commit 9a422c03 "x86/hvm: stop passing explicit domid to
      hvm_create_ioreq_server()". It is now added to the new
      deprecated sets introduced by this patch.

[1] https://lists.xenproject.org/archives/html/xen-devel/2018-08/msg00270.html

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>

v4:
 - Re-base.
 - Pull deprecated params up to the top of the header.
 - Fix build failure and extend commit comment noting this.
 - Drop Andrew's A-b due to re-work.

v3:
 - Add HVM_PARAM_DM_DOMAIN to deprecated sets and #ifdef in params.h.

v2:
 - Disallow reads or writes of HVM_PARAM_BUFIOREQ_EVTCHN and mark it as
    deprecated in the header.
     - Updated commit comment w.r.t. HVM_PARAM_DM_DOMAIN change.
---
 xen/arch/x86/hvm/dm.c            |   2 +-
 xen/arch/x86/hvm/hvm.c           |  38 +++---------
 xen/arch/x86/hvm/ioreq.c         | 122 +++++----------------------------------
 xen/include/asm-x86/hvm/domain.h |   1 -
 xen/include/asm-x86/hvm/ioreq.h  |   4 +-
 xen/include/public/hvm/params.h  |  12 ++--
 6 files changed, 36 insertions(+), 143 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 87d97d055f..ba39e4f4f9 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -411,7 +411,7 @@ static int dm_op(const struct dmop_args *op_args)
         if ( data->pad[0] || data->pad[1] || data->pad[2] )
             break;
 
-        rc = hvm_create_ioreq_server(d, false, data->handle_bufioreq,
+        rc = hvm_create_ioreq_server(d, data->handle_bufioreq,
                                      &data->id);
         break;
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ac067a8d38..64373ced20 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4084,12 +4084,16 @@ static int hvm_allow_set_param(struct domain *d,
     case HVM_PARAM_CONSOLE_EVTCHN:
     case HVM_PARAM_X87_FIP_WIDTH:
         break;
+    /* The following parameters are deprecated. */
+    case HVM_PARAM_DM_DOMAIN:
+    case HVM_PARAM_BUFIOREQ_EVTCHN:
+        rc = -EPERM;
+        break;
     /*
      * The following parameters must not be set by the guest
      * since the domain may need to be paused.
      */
     case HVM_PARAM_IDENT_PT:
-    case HVM_PARAM_DM_DOMAIN:
     case HVM_PARAM_ACPI_S_STATE:
     /* The remaining parameters should not be set by the guest. */
     default:
@@ -4254,9 +4258,6 @@ static int hvmop_set_param(
              d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
             rc = -EINVAL;
         break;
-    case HVM_PARAM_BUFIOREQ_EVTCHN:
-        rc = -EINVAL;
-        break;
     case HVM_PARAM_TRIPLE_FAULT_REASON:
         if ( a.value > SHUTDOWN_MAX )
             rc = -EINVAL;
@@ -4364,13 +4365,11 @@ static int hvm_allow_get_param(struct domain *d,
     case HVM_PARAM_ALTP2M:
     case HVM_PARAM_X87_FIP_WIDTH:
         break;
-    /*
-     * The following parameters must not be read by the guest
-     * since the domain may need to be paused.
-     */
-    case HVM_PARAM_IOREQ_PFN:
-    case HVM_PARAM_BUFIOREQ_PFN:
+    /* The following parameters are deprecated. */
+    case HVM_PARAM_DM_DOMAIN:
     case HVM_PARAM_BUFIOREQ_EVTCHN:
+        rc = -ENODATA;
+        break;
     /* The remaining parameters should not be read by the guest. */
     default:
         if ( d == current->domain )
@@ -4424,25 +4423,6 @@ static int hvmop_get_param(
     case HVM_PARAM_X87_FIP_WIDTH:
         a.value = d->arch.x87_fip_width;
         break;
-    case HVM_PARAM_IOREQ_PFN:
-    case HVM_PARAM_BUFIOREQ_PFN:
-    case HVM_PARAM_BUFIOREQ_EVTCHN:
-        /*
-         * It may be necessary to create a default ioreq server here,
-         * because legacy versions of QEMU are not aware of the new API for
-         * explicit ioreq server creation. However, if the domain is not
-         * under construction then it will not be QEMU querying the
-         * parameters and thus the query should not have that side-effect.
-         */
-        if ( !d->creation_finished )
-        {
-            rc = hvm_create_ioreq_server(d, true,
-                                         HVM_IOREQSRV_BUFIOREQ_LEGACY, NULL);
-            if ( rc != 0 && rc != -EEXIST )
-                goto out;
-        }
-
-    /*FALLTHRU*/
     default:
         a.value = d->arch.hvm.params[a.index];
         break;
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 138ed697cd..b3e1a3a36f 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -55,9 +55,6 @@ static struct hvm_ioreq_server *get_ioreq_server(const struct 
domain *d,
     return GET_IOREQ_SERVER(d, id);
 }
 
-#define IS_DEFAULT(s) \
-    ((s) && (s) == GET_IOREQ_SERVER((s)->target, DEFAULT_IOSERVID))
-
 /*
  * Iterate over all possible ioreq servers.
  *
@@ -245,8 +242,6 @@ static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
     struct domain *d = s->target;
     unsigned int i;
 
-    ASSERT(!IS_DEFAULT(s));
-
     for ( i = 0; i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8; i++ )
     {
         if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.mask) )
@@ -261,7 +256,6 @@ static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, 
gfn_t gfn)
     struct domain *d = s->target;
     unsigned int i = gfn_x(gfn) - d->arch.hvm.ioreq_gfn.base;
 
-    ASSERT(!IS_DEFAULT(s));
     ASSERT(!gfn_eq(gfn, INVALID_GFN));
 
     set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
@@ -277,9 +271,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
     destroy_ring_for_helper(&iorp->va, iorp->page);
     iorp->page = NULL;
 
-    if ( !IS_DEFAULT(s) )
-        hvm_free_ioreq_gfn(s, iorp->gfn);
-
+    hvm_free_ioreq_gfn(s, iorp->gfn);
     iorp->gfn = INVALID_GFN;
 }
 
@@ -305,12 +297,7 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
     if ( d->is_dying )
         return -EINVAL;
 
-    if ( IS_DEFAULT(s) )
-        iorp->gfn = _gfn(buf ?
-                         d->arch.hvm.params[HVM_PARAM_BUFIOREQ_PFN] :
-                         d->arch.hvm.params[HVM_PARAM_IOREQ_PFN]);
-    else
-        iorp->gfn = hvm_alloc_ioreq_gfn(s);
+    iorp->gfn = hvm_alloc_ioreq_gfn(s);
 
     if ( gfn_eq(iorp->gfn, INVALID_GFN) )
         return -ENOMEM;
@@ -416,7 +403,7 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server 
*s, bool buf)
     struct domain *d = s->target;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
 
-    if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
+    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
         return;
 
     if ( guest_physmap_remove_page(d, iorp->gfn,
@@ -431,7 +418,7 @@ static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
     int rc;
 
-    if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
+    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
         return 0;
 
     clear_page(iorp->va);
@@ -483,17 +470,12 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
 
     if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
     {
-        struct domain *d = s->target;
-
         rc = alloc_unbound_xen_event_channel(v->domain, 0,
                                              s->emulator->domain_id, NULL);
         if ( rc < 0 )
             goto fail3;
 
         s->bufioreq_evtchn = rc;
-        if ( IS_DEFAULT(s) )
-            d->arch.hvm.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
-                s->bufioreq_evtchn;
     }
 
     sv->vcpu = v;
@@ -617,9 +599,6 @@ static void hvm_ioreq_server_free_rangesets(struct 
hvm_ioreq_server *s)
 {
     unsigned int i;
 
-    if ( IS_DEFAULT(s) )
-        return;
-
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
         rangeset_destroy(s->range[i]);
 }
@@ -630,11 +609,6 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
     unsigned int i;
     int rc;
 
-    if ( id == DEFAULT_IOSERVID )
-        goto done;
-
-    ASSERT(!IS_DEFAULT(s));
-
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
     {
         char *name;
@@ -659,7 +633,6 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
         rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
     }
 
- done:
     return 0;
 
  fail:
@@ -733,13 +706,6 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s,
 
     s->bufioreq_handling = bufioreq_handling;
 
-    if ( id == DEFAULT_IOSERVID )
-    {
-        rc = hvm_ioreq_server_map_pages(s);
-        if ( rc )
-            goto fail_map;
-    }
-
     for_each_vcpu ( d, v )
     {
         rc = hvm_ioreq_server_add_vcpu(s, v);
@@ -753,7 +719,6 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     hvm_ioreq_server_remove_all_vcpus(s);
     hvm_ioreq_server_unmap_pages(s);
 
- fail_map:
     hvm_ioreq_server_free_rangesets(s);
 
     put_domain(s->emulator);
@@ -782,8 +747,8 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server 
*s)
     put_domain(s->emulator);
 }
 
-int hvm_create_ioreq_server(struct domain *d, bool is_default,
-                            int bufioreq_handling, ioservid_t *id)
+int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
+                            ioservid_t *id)
 {
     struct hvm_ioreq_server *s;
     unsigned int i;
@@ -799,32 +764,19 @@ int hvm_create_ioreq_server(struct domain *d, bool 
is_default,
     domain_pause(d);
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
-    if ( is_default )
+    for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
     {
-        i = DEFAULT_IOSERVID;
-
-        rc = -EEXIST;
-        if ( GET_IOREQ_SERVER(d, i) )
-            goto fail;
+        if ( !GET_IOREQ_SERVER(d, i) )
+            break;
     }
-    else
-    {
-        for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
-        {
-            if ( i != DEFAULT_IOSERVID && !GET_IOREQ_SERVER(d, i) )
-                break;
-        }
 
-        rc = -ENOSPC;
-        if ( i >= MAX_NR_IOREQ_SERVERS )
-            goto fail;
-    }
+    rc = -ENOSPC;
+    if ( i >= MAX_NR_IOREQ_SERVERS )
+        goto fail;
 
     /*
      * It is safe to call set_ioreq_server() prior to
      * hvm_ioreq_server_init() since the target domain is paused.
-     * It is necessary for the calls to be ordered thus otherwise
-     * the IS_DEFAULT() macro would not evaluate correctly.
      */
     set_ioreq_server(d, i, s);
 
@@ -835,9 +787,6 @@ int hvm_create_ioreq_server(struct domain *d, bool 
is_default,
         goto fail;
     }
 
-    if ( i == DEFAULT_IOSERVID )
-        hvm_ioreq_server_enable(s);
-
     if ( id )
         *id = i;
 
@@ -859,9 +808,6 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( id == DEFAULT_IOSERVID )
-        return -EPERM;
-
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
@@ -870,8 +816,6 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
     if ( !s )
         goto out;
 
-    ASSERT(!IS_DEFAULT(s));
-
     rc = -EPERM;
     if ( s->emulator != current->domain )
         goto out;
@@ -884,9 +828,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
 
     /*
      * It is safe to call hvm_ioreq_server_deinit() prior to
-     * set_ioreq_server() since the target domain is paused. It is
-     * necessary for the calls to be ordered thus otherwise the
-     * IS_DEFAULT() macro would not evaluate correctly.
+     * set_ioreq_server() since the target domain is paused.
      */
     hvm_ioreq_server_deinit(s);
     set_ioreq_server(d, id, NULL);
@@ -911,9 +853,6 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t 
id,
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( id == DEFAULT_IOSERVID )
-        return -EOPNOTSUPP;
-
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
@@ -922,8 +861,6 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t 
id,
     if ( !s )
         goto out;
 
-    ASSERT(!IS_DEFAULT(s));
-
     rc = -EPERM;
     if ( s->emulator != current->domain )
         goto out;
@@ -961,9 +898,6 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t 
id,
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( id == DEFAULT_IOSERVID )
-        return -EOPNOTSUPP;
-
     if ( !is_hvm_domain(d) )
         return -EINVAL;
 
@@ -975,8 +909,6 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t 
id,
     if ( !s )
         goto out;
 
-    ASSERT(!IS_DEFAULT(s));
-
     rc = -EPERM;
     if ( s->emulator != current->domain )
         goto out;
@@ -1023,9 +955,6 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    if ( id == DEFAULT_IOSERVID )
-        return -EOPNOTSUPP;
-
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
@@ -1034,8 +963,6 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
     if ( !s )
         goto out;
 
-    ASSERT(!IS_DEFAULT(s));
-
     rc = -EPERM;
     if ( s->emulator != current->domain )
         goto out;
@@ -1080,9 +1007,6 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
*d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    if ( id == DEFAULT_IOSERVID )
-        return -EOPNOTSUPP;
-
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
@@ -1091,8 +1015,6 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
*d, ioservid_t id,
     if ( !s )
         goto out;
 
-    ASSERT(!IS_DEFAULT(s));
-
     rc = -EPERM;
     if ( s->emulator != current->domain )
         goto out;
@@ -1140,9 +1062,6 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( id == DEFAULT_IOSERVID )
-        return -EOPNOTSUPP;
-
     if ( type != HVMMEM_ioreq_server )
         return -EINVAL;
 
@@ -1157,8 +1076,6 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
     if ( !s )
         goto out;
 
-    ASSERT(!IS_DEFAULT(s));
-
     rc = -EPERM;
     if ( s->emulator != current->domain )
         goto out;
@@ -1185,9 +1102,6 @@ int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
-    if ( id == DEFAULT_IOSERVID )
-        return -EOPNOTSUPP;
-
     spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
@@ -1196,8 +1110,6 @@ int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
     if ( !s )
         goto out;
 
-    ASSERT(!IS_DEFAULT(s));
-
     rc = -EPERM;
     if ( s->emulator != current->domain )
         goto out;
@@ -1282,8 +1194,6 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
         /*
          * It is safe to call hvm_ioreq_server_deinit() prior to
          * set_ioreq_server() since the target domain is being destroyed.
-         * It is necessary for the calls to be ordered thus otherwise the
-         * IS_DEFAULT() macro would not evaluate correctly.
          */
         hvm_ioreq_server_deinit(s);
         set_ioreq_server(d, id, NULL);
@@ -1304,7 +1214,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
     unsigned int id;
 
     if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
-        return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
+        return NULL;
 
     cf8 = d->arch.hvm.pci_cf8;
 
@@ -1346,7 +1256,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
     {
         struct rangeset *r;
 
-        if ( IS_DEFAULT(s) || !s->enabled )
+        if ( !s->enabled )
             continue;
 
         r = s->range[type];
@@ -1384,7 +1294,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
         }
     }
 
-    return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
+    return NULL;
 }
 
 static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 7388cd895e..fa7ebb9a4e 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -90,7 +90,6 @@ struct hvm_pi_ops {
 };
 
 #define MAX_NR_IOREQ_SERVERS 8
-#define DEFAULT_IOSERVID 0
 
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index bab473cf44..e2588e912f 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -23,8 +23,8 @@ bool hvm_io_pending(struct vcpu *v);
 bool handle_hvm_io_completion(struct vcpu *v);
 bool is_ioreq_server_page(struct domain *d, const struct page_info *page);
 
-int hvm_create_ioreq_server(struct domain *d, bool is_default,
-                            int bufioreq_handling, ioservid_t *id);
+int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
+                            ioservid_t *id);
 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
                               unsigned long *ioreq_gfn,
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 396977c2bb..72f633ef2d 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -25,6 +25,14 @@
 
 #include "hvm_op.h"
 
+/* These parameters are deprecated and their meaning is undefined. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#define HVM_PARAM_DM_DOMAIN 13
+#define HVM_PARAM_BUFIOREQ_EVTCHN 26
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
 /*
  * Parameter space for HVMOP_{set,get}_param.
  */
@@ -83,7 +91,6 @@
 #define HVM_PARAM_IOREQ_PFN    5
 
 #define HVM_PARAM_BUFIOREQ_PFN 6
-#define HVM_PARAM_BUFIOREQ_EVTCHN 26
 
 #if defined(__i386__) || defined(__x86_64__)
 
@@ -181,9 +188,6 @@
 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */
 #define HVM_PARAM_IDENT_PT     12
 
-/* Device Model domain, defaults to 0. */
-#define HVM_PARAM_DM_DOMAIN    13
-
 /* ACPI S state: currently support S0 and S3 on x86. */
 #define HVM_PARAM_ACPI_S_STATE 14
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.