[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 05/11] x86/hvm: Introduce hvm_save_cpu_msrs_one func
This is used to save data from a single instance. Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> --- Changes since V7: - Moved the init of ctxt->count to hvm_save_cpu_msrs_one() --- xen/arch/x86/hvm/hvm.c | 103 +++++++++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 46 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e8e0be5..7ca3763 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1371,66 +1371,77 @@ static const uint32_t msrs_to_send[] = { }; static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send); -static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h) +static int hvm_save_cpu_msrs_one(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v; + unsigned int i; + struct hvm_msr *ctxt; + struct hvm_save_descriptor *desc = _p(&h->data[h->cur]); - for_each_vcpu ( d, v ) + if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id, + HVM_CPU_MSR_SIZE(msr_count_max)) ) + return 1; + ctxt = (struct hvm_msr *)&h->data[h->cur]; + + ctxt->count = 0; + for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i ) { - struct hvm_save_descriptor *desc = _p(&h->data[h->cur]); - struct hvm_msr *ctxt; - unsigned int i; + uint64_t val; + int rc = guest_rdmsr(v, msrs_to_send[i], &val); - if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id, - HVM_CPU_MSR_SIZE(msr_count_max)) ) - return 1; - ctxt = (struct hvm_msr *)&h->data[h->cur]; - ctxt->count = 0; + /* + * It is the programmers responsibility to ensure that + * msrs_to_send[] contain generally-read/write MSRs. + * X86EMUL_EXCEPTION here implies a missing feature, and that the + * guest doesn't have access to the MSR. + */ + if ( rc == X86EMUL_EXCEPTION ) + continue; - for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i ) + if ( rc != X86EMUL_OKAY ) { - uint64_t val; - int rc = guest_rdmsr(v, msrs_to_send[i], &val); + ASSERT_UNREACHABLE(); + return -ENXIO; + } - /* - * It is the programmers responsibility to ensure that - * msrs_to_send[] contain generally-read/write MSRs. - * X86EMUL_EXCEPTION here implies a missing feature, and that the - * guest doesn't have access to the MSR. - */ - if ( rc == X86EMUL_EXCEPTION ) - continue; + if ( !val ) + continue; /* Skip empty MSRs. */ - if ( rc != X86EMUL_OKAY ) - { - ASSERT_UNREACHABLE(); - return -ENXIO; - } + ctxt->msr[ctxt->count].index = msrs_to_send[i]; + ctxt->msr[ctxt->count++].val = val; + } - if ( !val ) - continue; /* Skip empty MSRs. */ + if ( hvm_funcs.save_msr ) + hvm_funcs.save_msr(v, ctxt); - ctxt->msr[ctxt->count].index = msrs_to_send[i]; - ctxt->msr[ctxt->count++].val = val; - } + ASSERT(ctxt->count <= msr_count_max); - if ( hvm_funcs.save_msr ) - hvm_funcs.save_msr(v, ctxt); + for ( i = 0; i < ctxt->count; ++i ) + ctxt->msr[i]._rsvd = 0; - ASSERT(ctxt->count <= msr_count_max); + if ( ctxt->count ) + { + /* Rewrite length to indicate how much space we actually used. */ + desc->length = HVM_CPU_MSR_SIZE(ctxt->count); + h->cur += HVM_CPU_MSR_SIZE(ctxt->count); + } + else + /* or rewind and remove the descriptor from the stream. */ + h->cur -= sizeof(struct hvm_save_descriptor); - for ( i = 0; i < ctxt->count; ++i ) - ctxt->msr[i]._rsvd = 0; + return 0; +} - if ( ctxt->count ) - { - /* Rewrite length to indicate how much space we actually used. */ - desc->length = HVM_CPU_MSR_SIZE(ctxt->count); - h->cur += HVM_CPU_MSR_SIZE(ctxt->count); - } - else - /* or rewind and remove the descriptor from the stream. */ - h->cur -= sizeof(struct hvm_save_descriptor); + +static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h) +{ + struct vcpu *v; + + for_each_vcpu ( d, v ) + { + int rc = hvm_save_cpu_msrs_one(v, h); + + if ( rc != 0 ) + return rc; } return 0; -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |