[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v12 09/11] x86/domctl: Don't pause the whole domain if only getting vcpu state


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Date: Mon, 16 Jul 2018 17:55:35 +0300
  • Cc: wei.liu2@xxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, ian.jackson@xxxxxxxxxxxxx, paul.durrant@xxxxxxxxxx, jbeulich@xxxxxxxx, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Mon, 16 Jul 2018 14:56:09 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=SC/u6S3MISKeXoymArCBb2i15YH3hR8m7zduwwC8jnf+QC1LcbcB3O/av9pGEoToLBRAEsN3ZeEhfFccUEQyLjfv4BPH2EfImoKYsqUZUUCgl3IZuBV4my2HzjlKubQP49JQqBw9GkBOFl+B4PNAyslTt+3ghhzIy9xjCo1KTgGyr1+W1DmGID5PGORn6CDd7lztvf/0+UDDpqrDsGWX5BTO549veGQOBob6R0YNxxecQm3CU3HBkDzrZVBDzu7AHkuPRl5A0LE/rWhNcEqjK+gBxj11XFzEQ3jlSIFxVg8VK+3VQ0V5WzNrRhe+XgUdciEMo1IVWBUgDqLTvjo0xA==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer:In-Reply-To:References;
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This patch is focused on moving the for loop to the caller so
now we can save info for a single vcpu instance with the save_one
handlers.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

---
Changes since V11:
        - Changed the CONTINUE return to return 0.
---
 xen/arch/x86/hvm/hvm.c  |  18 +++----
 xen/arch/x86/hvm/save.c | 137 +++++++++++++++++++++++++++++++++++++-----------
 2 files changed, 115 insertions(+), 40 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c458dab..ac0b496 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -793,6 +793,13 @@ static int hvm_save_cpu_ctxt_one(struct vcpu *v, 
hvm_domain_context_t *h)
     struct segment_register seg;
     struct hvm_hw_cpu ctxt;
 
+    /*
+     * We don't need to save state for a vcpu that is down; the restore
+     * code will leave it down if there is nothing saved.
+     */
+    if ( v->pause_flags & VPF_down )
+        return 0;
+
     memset(&ctxt, 0, sizeof(ctxt));
 
     /* Architecture-specific vmcs/vmcb bits */
@@ -899,13 +906,6 @@ static int hvm_save_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        /*
-         * We don't need to save state for a vcpu that is down; the restore
-         * code will leave it down if there is nothing saved.
-         */
-        if ( v->pause_flags & VPF_down )
-            continue;
-
         err = hvm_save_cpu_ctxt_one(v, h);
         if ( err )
             break;
@@ -1198,7 +1198,7 @@ static int hvm_save_cpu_xsave_states_one(struct vcpu *v, 
hvm_domain_context_t *h
     unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
     int err = 0;
 
-    if ( !cpu_has_xsave )
+    if ( !cpu_has_xsave || !xsave_enabled(v) )
         return 0;   /* do nothing */
 
     err = _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size);
@@ -1223,8 +1223,6 @@ static int hvm_save_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        if ( !xsave_enabled(v) )
-            continue;
         err = hvm_save_cpu_xsave_states_one(v, h);
         if ( err )
             break;
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index b674937..d57648d 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -138,9 +138,12 @@ size_t hvm_save_size(struct domain *d)
 int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int 
instance,
                  XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz)
 {
-    int rv;
+    int rv = 0;
     hvm_domain_context_t ctxt = { };
     const struct hvm_save_descriptor *desc;
+    bool is_single_instance = false;
+    uint32_t off = 0;
+    struct vcpu *v;
 
     if ( d->is_dying ||
          typecode > HVM_SAVE_CODE_MAX ||
@@ -148,43 +151,94 @@ int hvm_save_one(struct domain *d, unsigned int typecode, 
unsigned int instance,
          !hvm_sr_handlers[typecode].save )
         return -EINVAL;
 
+    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+        instance < d->max_vcpus )
+        is_single_instance = true;
+
     ctxt.size = hvm_sr_handlers[typecode].size;
-    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU )
+    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+        instance == d->max_vcpus )
         ctxt.size *= d->max_vcpus;
     ctxt.data = xmalloc_bytes(ctxt.size);
     if ( !ctxt.data )
         return -ENOMEM;
 
-    if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
-        printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n",
-               d->domain_id, typecode, rv);
-    else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) )
+    if ( is_single_instance )
+        vcpu_pause(d->vcpu[instance]);
+    else
+        domain_pause(d);
+
+    if ( is_single_instance )
     {
-        uint32_t off;
+        if ( hvm_sr_handlers[typecode].save_one != NULL )
+            rv = hvm_sr_handlers[typecode].save_one(d->vcpu[instance],
+                                                    &ctxt);
+        else
+            rv = hvm_sr_handlers[typecode].save(d, &ctxt);
 
-        for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length )
+        if ( rv != 0 )
         {
-            desc = (void *)(ctxt.data + off);
-            /* Move past header */
-            off += sizeof(*desc);
-            if ( ctxt.cur < desc->length ||
-                 off > ctxt.cur - desc->length )
-                break;
-            if ( instance == desc->instance )
-            {
-                rv = 0;
-                if ( guest_handle_is_null(handle) )
-                    *bufsz = desc->length;
-                else if ( *bufsz < desc->length )
-                    rv = -ENOBUFS;
-                else if ( copy_to_guest(handle, ctxt.data + off, desc->length) 
)
-                    rv = -EFAULT;
-                else
-                    *bufsz = desc->length;
-                break;
-            }
+            printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" 
(%d)\n",
+                   d->domain_id, typecode, rv);
+            vcpu_unpause(d->vcpu[instance]);
+        }
+        else if ( ctxt.cur >= sizeof(*desc) )
+        {
+            rv = -ENOENT;
+            desc = (void *)(ctxt.data);
+             /* Move past header */
+            off = sizeof(*desc);
+             if ( ctxt.cur < desc->length ||
+                  off > ctxt.cur - desc->length )
+                rv = -EFAULT;
+            rv = 0;
+            if ( guest_handle_is_null(handle) )
+                *bufsz = desc->length;
+            else if ( *bufsz < desc->length )
+               rv = -ENOBUFS;
+            else if ( copy_to_guest(handle, ctxt.data + off, desc->length) )
+                rv = -EFAULT;
+            else
+                *bufsz = desc->length;
+            vcpu_unpause(d->vcpu[instance]);
         }
     }
+    else
+    {
+        for_each_vcpu ( d, v )
+        {
+            if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
+            {
+                printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" 
(%d)\n",
+                       d->domain_id, typecode, rv);
+            }
+            else if ( ctxt.cur >= sizeof(*desc) )
+            {
+                rv = -ENOENT;
+                desc = (void *)(ctxt.data + off);
+                /* Move past header */
+                off += sizeof(*desc);
+                if ( ctxt.cur < desc->length ||
+                     off > ctxt.cur - desc->length )
+                    break;
+                if ( instance == desc->instance )
+                {
+                    rv = 0;
+                    if ( guest_handle_is_null(handle) )
+                        *bufsz = desc->length;
+                    else if ( *bufsz < desc->length )
+                        rv = -ENOBUFS;
+                    else if ( copy_to_guest(handle, ctxt.data + off, 
desc->length) )
+                        rv = -EFAULT;
+                    else
+                        *bufsz = desc->length;
+                    break;
+                }
+                off += desc->length;
+             }
+         }
+        domain_unpause(d);
+     }
 
     xfree(ctxt.data);
     return rv;
@@ -196,7 +250,9 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     struct hvm_save_header hdr;
     struct hvm_save_end end;
     hvm_save_handler handler;
-    unsigned int i;
+    hvm_save_one_handler save_one_handler;
+    unsigned int i, rc;
+    struct vcpu *v = NULL;
 
     if ( d->is_dying )
         return -EINVAL;
@@ -224,11 +280,32 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
     {
         handler = hvm_sr_handlers[i].save;
-        if ( handler != NULL )
+        save_one_handler = hvm_sr_handlers[i].save_one;
+        if ( save_one_handler != NULL )
         {
             printk(XENLOG_G_INFO "HVM%d save: %s\n",
                    d->domain_id, hvm_sr_handlers[i].name);
-            if ( handler(d, h) != 0 )
+            for_each_vcpu ( d, v )
+            {
+                rc = save_one_handler(v, h);
+
+                if( rc != 0 )
+                {
+                    printk(XENLOG_G_ERR
+                           "HVM%d save: failed to save type %"PRIu16"\n",
+                           d->domain_id, i);
+                    return -EFAULT;
+                }
+            }
+        }
+        else if ( handler != NULL )
+        {
+            printk(XENLOG_G_INFO "HVM%d save: %s\n",
+                   d->domain_id, hvm_sr_handlers[i].name);
+
+            rc = handler(d, h);
+
+            if( rc != 0 )
             {
                 printk(XENLOG_G_ERR
                        "HVM%d save: failed to save type %"PRIu16"\n",
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.