[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 2/3] xen/hvm-save: Extend hvm_save_handler to take an instance parameter



The per-domain save handlers take this new parameter and (other than the PIC
which is the special case), use it in their hvm_save_entry() calls.

The per-vcpu save handlers take this new parameter and currently ignore it.

The return value for hvm_save_handler has been redefined to be more useful.
All save handlers have been modified to return negative on failure.  The
per-domain ones return the number of bytes written, but the per-vcpu ones
still return 0 on success.  This involved tweaking hvm_save_entry() and
_hvm_{init,write}_entry(), and most uses of them.

The callsites for the save handlers currently have the instance parameter
hardwired to 0, and have their error detection code suitably altered.

There should be no observable difference as a result of this patch.

There are a few scattered style fixes in the save functions.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Suggested-by: Jan Beulich <JBeulich@xxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Don Slutz <dslutz@xxxxxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/vmce.c |    5 +++--
 xen/arch/x86/hvm/hpet.c        |    6 +++---
 xen/arch/x86/hvm/hvm.c         |   19 +++++++++++--------
 xen/arch/x86/hvm/i8254.c       |    6 +++---
 xen/arch/x86/hvm/irq.c         |   15 +++++++++------
 xen/arch/x86/hvm/mtrr.c        |    7 ++++---
 xen/arch/x86/hvm/pmtimer.c     |    5 +++--
 xen/arch/x86/hvm/rtc.c         |    6 ++++--
 xen/arch/x86/hvm/vioapic.c     |    5 +++--
 xen/arch/x86/hvm/viridian.c    |   12 +++++++-----
 xen/arch/x86/hvm/vlapic.c      |   10 ++++++----
 xen/arch/x86/hvm/vpic.c        |    7 ++++---
 xen/common/hvm/save.c          |   15 ++++++++-------
 xen/include/xen/hvm/save.h     |   30 ++++++++++++++++++++----------
 14 files changed, 88 insertions(+), 60 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index a88368a..8ef40c3 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -296,7 +296,8 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
     return ret;
 }
 
-static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int vmce_save_vcpu_ctxt(struct domain *d, uint16_t inst,
+                               hvm_domain_context_t *h)
 {
     struct vcpu *v;
     int err = 0;
@@ -309,7 +310,7 @@ static int vmce_save_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
         };
 
         err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
-        if ( err )
+        if ( err < 0 )
             break;
     }
 
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index fb2c098..fbde99d 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -468,7 +468,7 @@ const struct hvm_mmio_handler hpet_mmio_handler = {
 };
 
 
-static int hpet_save(struct domain *d, hvm_domain_context_t *h)
+static int hpet_save(struct domain *d, uint16_t inst, hvm_domain_context_t *h)
 {
     HPETState *hp = domain_vhpet(d);
     int rc;
@@ -479,8 +479,8 @@ static int hpet_save(struct domain *d, hvm_domain_context_t 
*h)
     hp->hpet.mc64 = hp->mc_offset + guest_time_hpet(hp);
 
     /* Save the HPET registers */
-    rc = _hvm_init_entry(h, HVM_SAVE_CODE(HPET), 0, HVM_SAVE_LENGTH(HPET));
-    if ( rc == 0 )
+    rc = _hvm_init_entry(h, HVM_SAVE_CODE(HPET), inst, HVM_SAVE_LENGTH(HPET));
+    if ( rc >= 0 )
     {
         struct hvm_hw_hpet *rec = (struct hvm_hw_hpet *)&h->data[h->cur];
         h->cur += HVM_SAVE_LENGTH(HPET);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index eb21fc4..925e792 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -662,7 +662,8 @@ void hvm_domain_destroy(struct domain *d)
     vioapic_deinit(d);
 }
 
-static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_tsc_adjust(struct domain *d, uint16_t inst,
+                               hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct hvm_tsc_adjust ctxt;
@@ -672,7 +673,7 @@ static int hvm_save_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
     {
         ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
         err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
-        if ( err )
+        if ( err < 0 )
             break;
     }
 
@@ -702,7 +703,8 @@ static int hvm_load_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE_PER_VCPU(TSC_ADJUST, hvm_save_tsc_adjust,
                                    hvm_load_tsc_adjust);
 
-static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_ctxt(struct domain *d, uint16_t inst,
+                             hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
@@ -806,8 +808,8 @@ static int hvm_save_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
         ctxt.dr6 = v->arch.debugreg[6];
         ctxt.dr7 = v->arch.debugreg[7];
 
-        if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
-            return 1; 
+        if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) < 0 )
+            return -ENOSPC;
     }
     return 0;
 }
@@ -1005,7 +1007,8 @@ HVM_REGISTER_SAVE_RESTORE_PER_VCPU(CPU, 
hvm_save_cpu_ctxt, hvm_load_cpu_ctxt);
                                            save_area) + \
                                   xstate_ctxt_size(xcr0))
 
-static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_xsave_states(struct domain *d, uint16_t inst,
+                                     hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct hvm_hw_cpu_xsave *ctxt;
@@ -1019,8 +1022,8 @@ static int hvm_save_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 
         if ( !xsave_enabled(v) )
             continue;
-        if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) )
-            return 1;
+        if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) < 0 )
+            return -ENOSPC;
         ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
         h->cur += size;
 
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index 139812a..33e6990 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -382,14 +382,14 @@ void pit_stop_channel0_irq(PITState *pit)
     spin_unlock(&pit->lock);
 }
 
-static int pit_save(struct domain *d, hvm_domain_context_t *h)
+static int pit_save(struct domain *d, uint16_t inst, hvm_domain_context_t *h)
 {
     PITState *pit = domain_vpit(d);
     int rc;
 
     spin_lock(&pit->lock);
-    
-    rc = hvm_save_entry(PIT, 0, h, &pit->hw);
+
+    rc = hvm_save_entry(PIT, inst, h, &pit->hw);
 
     spin_unlock(&pit->lock);
 
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 04ce739..a23bd0a 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -524,7 +524,8 @@ static int __init dump_irq_info_key_init(void)
 }
 __initcall(dump_irq_info_key_init);
 
-static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_pci(struct domain *d, uint16_t inst,
+                        hvm_domain_context_t *h)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
     unsigned int asserted, pdev, pintx;
@@ -546,7 +547,7 @@ static int irq_save_pci(struct domain *d, 
hvm_domain_context_t *h)
         __hvm_pci_intx_deassert(d, pdev, pintx);
 
     /* Save PCI IRQ lines */
-    rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
+    rc = hvm_save_entry(PCI_IRQ, inst, h, &hvm_irq->pci_intx);
 
     if ( asserted )
         __hvm_pci_intx_assert(d, pdev, pintx);    
@@ -556,20 +557,22 @@ static int irq_save_pci(struct domain *d, 
hvm_domain_context_t *h)
     return rc;
 }
 
-static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_isa(struct domain *d, uint16_t inst,
+                        hvm_domain_context_t *h)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
 
     /* Save ISA IRQ lines */
-    return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
+    return hvm_save_entry(ISA_IRQ, inst, h, &hvm_irq->isa_irq);
 }
 
-static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_link(struct domain *d, uint16_t inst,
+                         hvm_domain_context_t *h)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
 
     /* Save PCI-ISA link state */
-    return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
+    return hvm_save_entry(PCI_LINK, inst, h, &hvm_irq->pci_link);
 }
 
 static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 61c785c..40f58ed 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -600,7 +600,8 @@ int32_t hvm_set_mem_pinned_cacheattr(
     return 0;
 }
 
-static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_mtrr_msr(struct domain *d, uint16_t inst,
+                             hvm_domain_context_t *h)
 {
     int i;
     struct vcpu *v;
@@ -631,8 +632,8 @@ static int hvm_save_mtrr_msr(struct domain *d, 
hvm_domain_context_t *h)
             hw_mtrr.msr_mtrr_fixed[i] =
                 ((uint64_t*)mtrr_state->fixed_ranges)[i];
 
-        if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
-            return 1;
+        if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) < 0 )
+            return -ENOSPC;
     }
     return 0;
 }
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 282e8ee..db27950 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -241,7 +241,8 @@ static int handle_pmt_io(
     return X86EMUL_UNHANDLEABLE;
 }
 
-static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
+static int pmtimer_save(struct domain *d, uint16_t inst,
+                        hvm_domain_context_t *h)
 {
     PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
     uint32_t x, msb = s->pm.tmr_val & TMR_VAL_MSB;
@@ -260,7 +261,7 @@ static int pmtimer_save(struct domain *d, 
hvm_domain_context_t *h)
     /* No point in setting the SCI here because we'll already have saved the 
      * IRQ and *PIC state; we'll fix it up when we restore the domain */
 
-    rc = hvm_save_entry(PMTIMER, 0, h, &s->pm);
+    rc = hvm_save_entry(PMTIMER, inst, h, &s->pm);
 
     spin_unlock(&s->lock);
 
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 8d9d634..e3fe472 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -702,13 +702,15 @@ void rtc_migrate_timers(struct vcpu *v)
 }
 
 /* Save RTC hardware state */
-static int rtc_save(struct domain *d, hvm_domain_context_t *h)
+static int rtc_save(struct domain *d, uint16_t inst, hvm_domain_context_t *h)
 {
     RTCState *s = domain_vrtc(d);
     int rc;
+
     spin_lock(&s->lock);
-    rc = hvm_save_entry(RTC, 0, h, &s->hw);
+    rc = hvm_save_entry(RTC, inst, h, &s->hw);
     spin_unlock(&s->lock);
+
     return rc;
 }
 
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 7c75192..3efd2d1 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -416,10 +416,11 @@ void vioapic_update_EOI(struct domain *d, int vector)
     spin_unlock(&d->arch.hvm_domain.irq_lock);
 }
 
-static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
+static int ioapic_save(struct domain *d, uint16_t inst, hvm_domain_context_t 
*h)
 {
     struct hvm_hw_vioapic *s = domain_vioapic(d);
-    return hvm_save_entry(IOAPIC, 0, h, s);
+
+    return hvm_save_entry(IOAPIC, inst, h, s);
 }
 
 static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 0ba85b3..4f9186d 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -423,7 +423,8 @@ out:
     return HVM_HCALL_completed;
 }
 
-static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_domain_ctxt(struct domain *d, uint16_t inst,
+                                     hvm_domain_context_t *h)
 {
     struct hvm_viridian_domain_context ctxt;
 
@@ -433,7 +434,7 @@ static int viridian_save_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     ctxt.hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
     ctxt.guest_os_id   = d->arch.hvm_domain.viridian.guest_os_id.raw;
 
-    return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0);
+    return hvm_save_entry(VIRIDIAN_DOMAIN, inst, h, &ctxt);
 }
 
 static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
@@ -452,7 +453,8 @@ static int viridian_load_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE_PER_DOM(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
                                   viridian_load_domain_ctxt, 1);
 
-static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_vcpu_ctxt(struct domain *d, uint16_t inst,
+                                   hvm_domain_context_t *h)
 {
     struct vcpu *v;
 
@@ -464,8 +466,8 @@ static int viridian_save_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 
         ctxt.apic_assist = v->arch.hvm_vcpu.viridian.apic_assist.raw;
 
-        if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
-            return 1;
+        if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) < 0 )
+            return -ENOSPC;
     }
 
     return 0;
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index b64b9ee..81dfd3f 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1137,7 +1137,8 @@ static void lapic_rearm(struct vlapic *s)
     s->timer_last_update = s->pt.last_plt_gtime;
 }
 
-static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_hidden(struct domain *d, uint16_t inst,
+                             hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct vlapic *s;
@@ -1146,14 +1147,15 @@ static int lapic_save_hidden(struct domain *d, 
hvm_domain_context_t *h)
     for_each_vcpu ( d, v )
     {
         s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 )
+        if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) < 0 )
             break;
     }
 
     return rc;
 }
 
-static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_regs(struct domain *d, uint16_t inst,
+                           hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct vlapic *s;
@@ -1162,7 +1164,7 @@ static int lapic_save_regs(struct domain *d, 
hvm_domain_context_t *h)
     for_each_vcpu ( d, v )
     {
         s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
+        if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) < 0 )
             break;
     }
 
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index e882fe1..7e4b64b 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -364,7 +364,8 @@ static int vpic_intercept_elcr_io(
     return X86EMUL_OKAY;
 }
 
-static int vpic_save(struct domain *d, hvm_domain_context_t *h)
+static int vpic_save(struct domain *d, uint16_t inst,
+                     hvm_domain_context_t *h)
 {
     struct hvm_hw_vpic *s;
     int i;
@@ -373,8 +374,8 @@ static int vpic_save(struct domain *d, hvm_domain_context_t 
*h)
     for ( i = 0; i < 2 ; i++ )
     {
         s = &d->arch.hvm_domain.vpic[i];
-        if ( hvm_save_entry(PIC, i, h, s) )
-            return 1;
+        if ( hvm_save_entry(PIC, i, h, s) < 0 )
+            return -ENOSPC;
     }
 
     return 0;
diff --git a/xen/common/hvm/save.c b/xen/common/hvm/save.c
index 2800c5b..e9723e3 100644
--- a/xen/common/hvm/save.c
+++ b/xen/common/hvm/save.c
@@ -109,7 +109,7 @@ int hvm_save_one(struct domain *d, uint16_t typecode, 
uint16_t instance,
     if ( !ctxt.data )
         return -ENOMEM;
 
-    if ( hvm_sr_handlers[typecode].save(d, &ctxt) != 0 )
+    if ( hvm_sr_handlers[typecode].save(d, 0, &ctxt) < 0 )
     {
         printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16"\n",
                d->domain_id, typecode);
@@ -150,7 +150,7 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
 
     arch_hvm_save(d, &hdr);
 
-    if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
+    if ( hvm_save_entry(HEADER, 0, h, &hdr) < 0 )
     {
         printk(XENLOG_G_ERR "HVM%d save: failed to write header\n",
                d->domain_id);
@@ -165,7 +165,7 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
         {
             printk(XENLOG_G_INFO "HVM%d save: %s\n",
                    d->domain_id, hvm_sr_handlers[i].name);
-            if ( handler(d, h) != 0 ) 
+            if ( handler(d, 0, h) < 0 )
             {
                 printk(XENLOG_G_ERR
                        "HVM%d save: failed to save type %"PRIu16"\n",
@@ -176,7 +176,7 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     }
 
     /* Save an end-of-file marker */
-    if ( hvm_save_entry(END, 0, h, &end) != 0 )
+    if ( hvm_save_entry(END, 0, h, &end) < 0 )
     {
         /* Run out of data */
         printk(XENLOG_G_ERR "HVM%d save: no room for end marker\n",
@@ -260,20 +260,21 @@ int _hvm_init_entry(struct hvm_domain_context *h,
         printk(XENLOG_G_WARNING "HVM save: no room for"
                " %"PRIu32" + %zu bytes for typecode %"PRIu16"\n",
                len, sizeof(*d), tc);
-        return -1;
+        return -ENOSPC;
     }
     d->typecode = tc;
     d->instance = inst;
     d->length = len;
     h->cur += sizeof(*d);
-    return 0;
+    return sizeof(*d);
 }
 
-void _hvm_write_entry(struct hvm_domain_context *h,
+int _hvm_write_entry(struct hvm_domain_context *h,
                       void *src, uint32_t src_len)
 {
     memcpy(&h->data[h->cur], src, src_len);
     h->cur += src_len;
+    return src_len;
 }
 
 int _hvm_check_entry(struct hvm_domain_context *h, 
diff --git a/xen/include/xen/hvm/save.h b/xen/include/xen/hvm/save.h
index 0e3ef13..b2dca48 100644
--- a/xen/include/xen/hvm/save.h
+++ b/xen/include/xen/hvm/save.h
@@ -30,21 +30,23 @@ typedef struct hvm_domain_context {
     uint8_t *data;
 } hvm_domain_context_t;
 
-/* Marshalling an entry: check space and fill in the header */
+/* Marshalling an entry: check space and fill in the header.  Returns the
+ * number of bytes written, or negative for an error. */
 int _hvm_init_entry(struct hvm_domain_context *h,
                     uint16_t tc, uint16_t inst, uint32_t len);
 
-/* Marshalling: copy the contents in a type-safe way */
-void _hvm_write_entry(struct hvm_domain_context *h,
-                      void *src, uint32_t src_len);
+/* Marshalling: copy the contents in a type-safe way.  Returns the number of
+ * bytes written. */
+int _hvm_write_entry(struct hvm_domain_context *h,
+                     void *src, uint32_t src_len);
 
 /* Marshalling: init and copy; evaluates to zero on success */
 #define hvm_save_entry(_x, _inst, _h, _src) ({                  \
     int r;                                                      \
     r = _hvm_init_entry((_h), HVM_SAVE_CODE(_x),                \
                         (_inst), HVM_SAVE_LENGTH(_x));          \
-    if ( r == 0 )                                               \
-        _hvm_write_entry((_h), (_src), HVM_SAVE_LENGTH(_x));    \
+    if ( r >= 0 )                                               \
+        r += _hvm_write_entry((_h), (_src), HVM_SAVE_LENGTH(_x)); \
     r; })
 
 /* Unmarshalling: test an entry's size and typecode and record the instance */
@@ -85,11 +87,19 @@ static inline uint16_t hvm_load_instance(struct 
hvm_domain_context *h)
     return d->instance;
 }
 
-/* Handler types for different types of save-file entry. 
- * The save handler may save multiple instances of a type into the buffer;
- * the load handler will be called once for each instance found when
- * restoring.  Both return non-zero on error. */
+/* Handler types for different types of save-file entry.
+ *
+ * The save handler will be called once for each instance (either the number
+ * of per-domain instances, or for each vcpu).  The caller ensures that the
+ * instance parameter is always valid in context (i.e. for a per-vcpu type,
+ * instance refers to a valid vcpu).  It returns negative for an error, or the
+ * number of bytes written.
+ *
+ * The load handler will be called once for each instance found when
+ * restoring.  It returns non-zero for an error.
+ */
 typedef int (*hvm_save_handler) (struct domain *d, 
+                                 uint16_t instance,
                                  hvm_domain_context_t *h);
 typedef int (*hvm_load_handler) (struct domain *d,
                                  hvm_domain_context_t *h);
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.