[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/hvm/ioreq: allow ioreq servers to use HVM_PARAM_[BUF]IOREQ_PFN



Since commit 2c257bd6 "x86/hvm: remove default ioreq server (again)" the
GFNs allocated by the toolstack and set in HVM_PARAM_IOREQ_PFN and
HVM_PARAM_BUFIOREQ_PFN have been unused. This patch allows them to be used
by (non-default) ioreq servers.

While in the area, also make sure HVM_PARAM_[BUF]IOREQ_PFN can only be set
once. These parameters should have always been in the 'set once' category
but this has, so far, not been enforced.

NOTE: This fixes a compatibility issue. A guest created on a version of
      Xen that pre-dates the initial ioreq server implementation and then
      migrated in will currently fail to resume because its migration
      stream will lack values for HVM_PARAM_IOREQ_SERVER_PFN and
      HVM_PARAM_NR_IOREQ_SERVER_PAGES *unless* the system has an
      emulator domain that uses direct resource mapping (which depends
      on the version of privcmd it happens to have) in which case it
      will not require use of GFNs for the ioreq server shared
      pages.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>

A similar compatibility issue with migrated-in VMs exists with Xen 4.11
because the upstream QEMU fall-back to use legacy ioreq server was broken
when direct resource mapping was introduced.
This is because, prior to the resource mapping patches, it was the creation
of the non-default ioreq server that failed if GFNs were not available
whereas, as of 4.11, it is retrieval of the info that fails which does not
trigger the fall-back.
The fix for 4.11 will necessarily be different (because this patch is
predicated on legacy ioreq server support having been removed). I will
post a patch for 4.11 once this patch has been committed.

v2:
 - Squash the separate 'set once' patch into this one.
 - Use a clear-to-allocate mask to make sure that we don't accidentally
   allocate GFN 0 if a bug in the toolstack leaves the params unset.
---
 xen/arch/x86/hvm/hvm.c           | 11 ++++++++++
 xen/arch/x86/hvm/ioreq.c         | 46 ++++++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/hvm/domain.h |  3 ++-
 3 files changed, 57 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6c1301df42..f4765621f5 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4071,6 +4071,8 @@ static int hvm_allow_set_param(struct domain *d,
     {
     /* The following parameters should only be changed once. */
     case HVM_PARAM_VIRIDIAN:
+    case HVM_PARAM_IOREQ_PFN:
+    case HVM_PARAM_BUFIOREQ_PFN:
     case HVM_PARAM_IOREQ_SERVER_PFN:
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     case HVM_PARAM_ALTP2M:
@@ -4242,6 +4244,15 @@ static int hvmop_set_param(
 
         break;
     }
+    case HVM_PARAM_IOREQ_PFN:
+    case HVM_PARAM_BUFIOREQ_PFN:
+        BUILD_BUG_ON(HVM_PARAM_IOREQ_PFN >
+                     sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
+        BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN >
+                     sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
+        set_bit(a.index, &d->arch.hvm.ioreq_gfn.legacy_mask);
+        break;
+
     case HVM_PARAM_X87_FIP_WIDTH:
         if ( a.value != 0 && a.value != 4 && a.value != 8 )
         {
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 3569beaad5..e2e755a8a1 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -237,6 +237,22 @@ bool handle_hvm_io_completion(struct vcpu *v)
     return true;
 }
 
+static gfn_t hvm_alloc_legacy_ioreq_gfn(struct hvm_ioreq_server *s)
+{
+    struct domain *d = s->target;
+    unsigned int i;
+
+    BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN != HVM_PARAM_IOREQ_PFN + 1);
+
+    for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
+    {
+        if ( !test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask) )
+            return _gfn(d->arch.hvm.params[i]);
+    }
+
+    return INVALID_GFN;
+}
+
 static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->target;
@@ -248,7 +264,29 @@ static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server 
*s)
             return _gfn(d->arch.hvm.ioreq_gfn.base + i);
     }
 
-    return INVALID_GFN;
+    /*
+     * If we are out of 'normal' GFNs then we may still have a 'legacy'
+     * GFN available.
+     */
+    return hvm_alloc_legacy_ioreq_gfn(s);
+}
+
+static bool hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server *s,
+                                      gfn_t gfn)
+{
+    struct domain *d = s->target;
+    unsigned int i;
+
+    for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
+    {
+        if ( gfn_eq(gfn, _gfn(d->arch.hvm.params[i])) )
+             break;
+    }
+    if ( i > HVM_PARAM_BUFIOREQ_PFN )
+        return false;
+
+    set_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask);
+    return true;
 }
 
 static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
@@ -258,7 +296,11 @@ static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, 
gfn_t gfn)
 
     ASSERT(!gfn_eq(gfn, INVALID_GFN));
 
-    set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
+    if ( !hvm_free_legacy_ioreq_gfn(s, gfn) )
+    {
+        ASSERT(i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8);
+        set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
+    }
 }
 
 static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 80b2ab041e..3e7331817f 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -95,7 +95,8 @@ struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
         unsigned long base;
-        unsigned long mask;
+        unsigned long mask; /* indexed by GFN minus base */
+        unsigned long legacy_mask; /* indexed by HVM param number */
     } ioreq_gfn;
 
     /* Lock protects all other values in the sub-struct and the default */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.