[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [Patch 5/6] Xen/MCE: vMCE save and restore



>>> On 23.07.12 at 11:44, "Liu, Jinsong" <jinsong.liu@xxxxxxxxx> wrote:
> Xen/MCE: vMCE save and restore
> 
> This patch provide vMCE save/restore when migration.
> 1. MCG_CAP is well-defined. However, considering future cap extension, we 
> keep save/restore logic that Jan implement at c/s 24887;
> 2. MCi_CTL2 initialized by guestos when booting, so need save/restore 
> otherwise guest would surprise;
> 3. Other MSRs do not need save/restore since they are either error-related 
> and pointless to save/restore, or, unified among all vMCE platform;
> 
> Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>

I fail to see how the changes below are compatible with the
current (and hence SLE11 SP2) implementation.

Jan

> diff -r 9851ff6c446a tools/misc/xen-hvmctx.c
> --- a/tools/misc/xen-hvmctx.c Mon Jul 16 19:29:50 2012 +0800
> +++ b/tools/misc/xen-hvmctx.c Mon Jul 16 21:14:19 2012 +0800
> @@ -388,6 +388,8 @@
>      HVM_SAVE_TYPE(VMCE_VCPU) p;
>      READ(p);
>      printf("    VMCE_VCPU: caps %" PRIx64 "\n", p.caps);
> +    printf("    VMCE_VCPU: bank0 mci_ctl2 %" PRIx64 "\n", 
> p.mci_ctl2_bank0);
> +    printf("    VMCE_VCPU: bank1 mci_ctl2 %" PRIx64 "\n", 
> p.mci_ctl2_bank1);
>  }
>  
>  int main(int argc, char **argv)
> diff -r 9851ff6c446a xen/arch/x86/cpu/mcheck/vmce.c
> --- a/xen/arch/x86/cpu/mcheck/vmce.c  Mon Jul 16 19:29:50 2012 +0800
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c  Mon Jul 16 21:14:19 2012 +0800
> @@ -56,8 +56,10 @@
>      spin_lock_init(&v->arch.vmce.lock);
>  }
>  
> -int vmce_restore_vcpu(struct vcpu *v, uint64_t caps)
> +int vmce_restore_vcpu(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
>  {
> +    uint64_t caps = ctxt->caps;
> +
>      if ( caps & ~GUEST_MCG_CAP & ~MCG_CAP_COUNT & ~MCG_CTL_P )
>      {
>          dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
> @@ -68,6 +70,9 @@
>      }
>  
>      v->arch.vmce.mcg_cap = caps;
> +    v->arch.vmce.bank[0].mci_ctl2 = ctxt->mci_ctl2_bank0;
> +    v->arch.vmce.bank[1].mci_ctl2 = ctxt->mci_ctl2_bank1;
> +
>      return 0;
>  }
>  
> @@ -308,7 +313,9 @@
>  
>      for_each_vcpu( d, v ) {
>          struct hvm_vmce_vcpu ctxt = {
> -            .caps = v->arch.vmce.mcg_cap
> +            .caps = v->arch.vmce.mcg_cap,
> +            .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
> +            .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2
>          };
>  
>          err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
> @@ -335,7 +342,7 @@
>      else
>          err = hvm_load_entry(VMCE_VCPU, h, &ctxt);
>  
> -    return err ?: vmce_restore_vcpu(v, ctxt.caps);
> +    return err ?: vmce_restore_vcpu(v, &ctxt);
>  }
>  
>  HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt,
> diff -r 9851ff6c446a xen/arch/x86/domctl.c
> --- a/xen/arch/x86/domctl.c   Mon Jul 16 19:29:50 2012 +0800
> +++ b/xen/arch/x86/domctl.c   Mon Jul 16 21:14:19 2012 +0800
> @@ -1023,12 +1023,14 @@
>                  evc->syscall32_callback_eip    = 0;
>                  evc->syscall32_disables_events = 0;
>              }
> -            evc->mcg_cap = v->arch.vmce.mcg_cap;
> +            evc->vmce.caps = v->arch.vmce.mcg_cap;
> +            evc->vmce.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
> +            evc->vmce.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
>          }
>          else
>          {
>              ret = -EINVAL;
> -            if ( evc->size < offsetof(typeof(*evc), mcg_cap) )
> +            if ( evc->size < offsetof(typeof(*evc), vmce) )
>                  goto ext_vcpucontext_out;
>  #ifdef __x86_64__
>              if ( !is_hvm_domain(d) )
> @@ -1060,9 +1062,9 @@
>                   evc->syscall32_callback_eip )
>                  goto ext_vcpucontext_out;
>  
> -            if ( evc->size >= offsetof(typeof(*evc), mcg_cap) +
> -                              sizeof(evc->mcg_cap) )
> -                ret = vmce_restore_vcpu(v, evc->mcg_cap);
> +            if ( evc->size >= offsetof(typeof(*evc), vmce) +
> +                              sizeof(evc->vmce) );
> +                ret = vmce_restore_vcpu(v, &evc->vmce);
>          }
>  
>          ret = 0;
> diff -r 9851ff6c446a xen/include/asm-x86/mce.h
> --- a/xen/include/asm-x86/mce.h       Mon Jul 16 19:29:50 2012 +0800
> +++ b/xen/include/asm-x86/mce.h       Mon Jul 16 21:14:19 2012 +0800
> @@ -44,7 +44,7 @@
>  
>  /* Guest vMCE MSRs virtualization */
>  extern void vmce_init_vcpu(struct vcpu *);
> -extern int vmce_restore_vcpu(struct vcpu *, uint64_t caps);
> +extern int vmce_restore_vcpu(struct vcpu *, struct hvm_vmce_vcpu *ctxt);
>  extern int vmce_wrmsr(uint32_t msr, uint64_t val);
>  extern int vmce_rdmsr(uint32_t msr, uint64_t *val);
>  
> diff -r 9851ff6c446a xen/include/public/arch-x86/hvm/save.h
> --- a/xen/include/public/arch-x86/hvm/save.h  Mon Jul 16 19:29:50 2012 +0800
> +++ b/xen/include/public/arch-x86/hvm/save.h  Mon Jul 16 21:14:19 2012 +0800
> @@ -577,6 +577,8 @@
>  
>  struct hvm_vmce_vcpu {
>      uint64_t caps;
> +    uint64_t mci_ctl2_bank0;
> +    uint64_t mci_ctl2_bank1;
>  };
>  
>  DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
> diff -r 9851ff6c446a xen/include/public/domctl.h
> --- a/xen/include/public/domctl.h     Mon Jul 16 19:29:50 2012 +0800
> +++ b/xen/include/public/domctl.h     Mon Jul 16 21:14:19 2012 +0800
> @@ -32,6 +32,7 @@
>  #error "domctl operations are intended for use by node control tools only"
>  #endif
>  
> +#include <xen/hvm/save.h>
>  #include "xen.h"
>  #include "grant_table.h"
>  
> @@ -571,7 +572,7 @@
>      uint16_t         sysenter_callback_cs;
>      uint8_t          syscall32_disables_events;
>      uint8_t          sysenter_disables_events;
> -    uint64_aligned_t mcg_cap;
> +    struct hvm_vmce_vcpu vmce;
>  #endif
>  };
>  typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.