[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/2] VMX: also use proper instruction mnemonic for VMREAD



On Mon, Sep 16, 2013 at 4:06 AM, Jan Beulich <JBeulich@xxxxxxxx> wrote:
> ... when assembler supports it, following commit cfd54835 ("VMX: use
> proper instruction mnemonics if assembler supports them"). This merely
> got split off from the earlier change becase of the significant number
> of call sites needing to be changed.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
>

> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -163,11 +163,11 @@ long_mode_do_msr_read(unsigned int msr,
>      switch ( msr )
>      {
>      case MSR_FS_BASE:
> -        *msr_content = __vmread(GUEST_FS_BASE);
> +        __vmread(GUEST_FS_BASE, msr_content);
>          break;
>
>      case MSR_GS_BASE:
> -        *msr_content = __vmread(GUEST_GS_BASE);
> +        __vmread(GUEST_GS_BASE, msr_content);
>          break;
>
>      case MSR_SHADOW_GS_BASE:
> @@ -348,13 +348,13 @@ void vmx_update_exception_bitmap(struct
>
>  static int vmx_guest_x86_mode(struct vcpu *v)
>  {
> -    unsigned int cs_ar_bytes;
> +    unsigned long cs_ar_bytes;
>
>      if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
>          return 0;
>      if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
>          return 1;
> -    cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
> +    __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
>      if ( hvm_long_mode_enabled(v) &&
>           likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
>          return 8;
> @@ -377,7 +377,7 @@ static void vmx_save_dr(struct vcpu *v)
>      v->arch.debugreg[3] = read_debugreg(3);
>      v->arch.debugreg[6] = read_debugreg(6);
>      /* DR7 must be saved as it is used by vmx_restore_dr(). */
> -    v->arch.debugreg[7] = __vmread(GUEST_DR7);
> +    __vmread(GUEST_DR7, &v->arch.debugreg[7]);
>  }
>
>  static void __restore_debug_registers(struct vcpu *v)
> @@ -410,7 +410,7 @@ static void vmx_restore_dr(struct vcpu *
>
>  static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
>  {
> -    uint32_t ev;
> +    unsigned long ev;
>
>      vmx_vmcs_enter(v);
>
> @@ -421,17 +421,19 @@ static void vmx_vmcs_save(struct vcpu *v
>
>      c->msr_efer = v->arch.hvm_vcpu.guest_efer;
>
> -    c->sysenter_cs = __vmread(GUEST_SYSENTER_CS);
> -    c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP);
> -    c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP);
> +    __vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
> +    __vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
> +    __vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
>
>      c->pending_event = 0;
>      c->error_code = 0;
> -    if ( ((ev = __vmread(VM_ENTRY_INTR_INFO)) & INTR_INFO_VALID_MASK) &&
> +    __vmread(VM_ENTRY_INTR_INFO, &ev);
> +    if ( (ev & INTR_INFO_VALID_MASK) &&
>           hvm_event_needs_reinjection((ev >> 8) & 7, ev & 0xff) )
>      {
>          c->pending_event = ev;
> -        c->error_code = __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE);
> +        __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &ev);
> +        c->error_code = ev;
>      }
>
>      vmx_vmcs_exit(v);
> @@ -667,74 +669,78 @@ static void vmx_ctxt_switch_to(struct vc
>  void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
>                                struct segment_register *reg)
>  {
> -    uint32_t attr = 0;
> +    unsigned long attr = 0, sel = 0, limit;

This is a minor thing, but why do you initialize 'attr' and 'sel', but
not 'limit'?

>
>      vmx_vmcs_enter(v);
>
>      switch ( seg )
>      {
>      case x86_seg_cs:
> -        reg->sel   = __vmread(GUEST_CS_SELECTOR);
> -        reg->limit = __vmread(GUEST_CS_LIMIT);
> -        reg->base  = __vmread(GUEST_CS_BASE);
> -        attr       = __vmread(GUEST_CS_AR_BYTES);
> +        __vmread(GUEST_CS_SELECTOR, &sel);
> +        __vmread(GUEST_CS_LIMIT,    &limit);
> +        __vmread(GUEST_CS_BASE,     &reg->base);
> +        __vmread(GUEST_CS_AR_BYTES, &attr);
>          break;
>      case x86_seg_ds:
> -        reg->sel   = __vmread(GUEST_DS_SELECTOR);
> -        reg->limit = __vmread(GUEST_DS_LIMIT);
> -        reg->base  = __vmread(GUEST_DS_BASE);
> -        attr       = __vmread(GUEST_DS_AR_BYTES);
> +        __vmread(GUEST_DS_SELECTOR, &sel);
> +        __vmread(GUEST_DS_LIMIT,    &limit);
> +        __vmread(GUEST_DS_BASE,     &reg->base);
> +        __vmread(GUEST_DS_AR_BYTES, &attr);
>          break;
>      case x86_seg_es:
> -        reg->sel   = __vmread(GUEST_ES_SELECTOR);
> -        reg->limit = __vmread(GUEST_ES_LIMIT);
> -        reg->base  = __vmread(GUEST_ES_BASE);
> -        attr       = __vmread(GUEST_ES_AR_BYTES);
> +        __vmread(GUEST_ES_SELECTOR, &sel);
> +        __vmread(GUEST_ES_LIMIT,    &limit);
> +        __vmread(GUEST_ES_BASE,     &reg->base);
> +        __vmread(GUEST_ES_AR_BYTES, &attr);
>          break;
>      case x86_seg_fs:
> -        reg->sel   = __vmread(GUEST_FS_SELECTOR);
> -        reg->limit = __vmread(GUEST_FS_LIMIT);
> -        reg->base  = __vmread(GUEST_FS_BASE);
> -        attr       = __vmread(GUEST_FS_AR_BYTES);
> +        __vmread(GUEST_FS_SELECTOR, &sel);
> +        __vmread(GUEST_FS_LIMIT,    &limit);
> +        __vmread(GUEST_FS_BASE,     &reg->base);
> +        __vmread(GUEST_FS_AR_BYTES, &attr);
>          break;
>      case x86_seg_gs:
> -        reg->sel   = __vmread(GUEST_GS_SELECTOR);
> -        reg->limit = __vmread(GUEST_GS_LIMIT);
> -        reg->base  = __vmread(GUEST_GS_BASE);
> -        attr       = __vmread(GUEST_GS_AR_BYTES);
> +        __vmread(GUEST_GS_SELECTOR, &sel);
> +        __vmread(GUEST_GS_LIMIT,    &limit);
> +        __vmread(GUEST_GS_BASE,     &reg->base);
> +        __vmread(GUEST_GS_AR_BYTES, &attr);
>          break;
>      case x86_seg_ss:
> -        reg->sel   = __vmread(GUEST_SS_SELECTOR);
> -        reg->limit = __vmread(GUEST_SS_LIMIT);
> -        reg->base  = __vmread(GUEST_SS_BASE);
> -        attr       = __vmread(GUEST_SS_AR_BYTES);
> +        __vmread(GUEST_SS_SELECTOR, &sel);
> +        __vmread(GUEST_SS_LIMIT,    &limit);
> +        __vmread(GUEST_SS_BASE,     &reg->base);
> +        __vmread(GUEST_SS_AR_BYTES, &attr);
>          break;
>      case x86_seg_tr:
> -        reg->sel   = __vmread(GUEST_TR_SELECTOR);
> -        reg->limit = __vmread(GUEST_TR_LIMIT);
> -        reg->base  = __vmread(GUEST_TR_BASE);
> -        attr       = __vmread(GUEST_TR_AR_BYTES);
> +        __vmread(GUEST_TR_SELECTOR, &sel);
> +        __vmread(GUEST_TR_LIMIT,    &limit);
> +        __vmread(GUEST_TR_BASE,     &reg->base);
> +        __vmread(GUEST_TR_AR_BYTES, &attr);
>          break;
>      case x86_seg_gdtr:
> -        reg->limit = __vmread(GUEST_GDTR_LIMIT);
> -        reg->base  = __vmread(GUEST_GDTR_BASE);
> +        __vmread(GUEST_GDTR_LIMIT, &limit);
> +        __vmread(GUEST_GDTR_BASE,  &reg->base);
>          break;
>      case x86_seg_idtr:
> -        reg->limit = __vmread(GUEST_IDTR_LIMIT);
> -        reg->base  = __vmread(GUEST_IDTR_BASE);
> +        __vmread(GUEST_IDTR_LIMIT, &limit);
> +        __vmread(GUEST_IDTR_BASE,  &reg->base);
>          break;
>      case x86_seg_ldtr:
> -        reg->sel   = __vmread(GUEST_LDTR_SELECTOR);
> -        reg->limit = __vmread(GUEST_LDTR_LIMIT);
> -        reg->base  = __vmread(GUEST_LDTR_BASE);
> -        attr       = __vmread(GUEST_LDTR_AR_BYTES);
> +        __vmread(GUEST_LDTR_SELECTOR, &sel);
> +        __vmread(GUEST_LDTR_LIMIT,    &limit);
> +        __vmread(GUEST_LDTR_BASE,     &reg->base);
> +        __vmread(GUEST_LDTR_AR_BYTES, &attr);
>          break;
>      default:
>          BUG();
> +        return;
>      }
>
>      vmx_vmcs_exit(v);
>
> +    reg->sel = sel;
> +    reg->limit = limit;
> +
>      reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
>      /* Unusable flag is folded into Present flag. */
>      if ( attr & (1u<<16) )

-- 
Jun
Intel Open Source Technology Center

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.