[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 1/2] x86/vmx: replace __vmread() with vmread()



From: Denis Mukhin <dmukhin@xxxxxxxx>

Use vmread() instead of __vmread() everywhere in the VT-x code.

Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Denis Mukhin <dmukhin@xxxxxxxx>
---
 xen/arch/x86/cpu/vpmu_intel.c   |   2 +-
 xen/arch/x86/hvm/vmx/intr.c     |  12 +--
 xen/arch/x86/hvm/vmx/realmode.c |   2 +-
 xen/arch/x86/hvm/vmx/vmcs.c     |   8 +-
 xen/arch/x86/hvm/vmx/vmx.c      | 170 ++++++++++++++++----------------
 xen/arch/x86/hvm/vmx/vvmx.c     |  36 +++----
 6 files changed, 115 insertions(+), 115 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 7ce98ee42e..e358342ac9 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -796,7 +796,7 @@ static int cf_check core2_vpmu_do_interrupt(void)
     else
     {
         /* No PMC overflow but perhaps a Trace Message interrupt. */
-        __vmread(GUEST_IA32_DEBUGCTL, &msr_content);
+        msr_content = vmread(GUEST_IA32_DEBUGCTL);
         if ( !(msr_content & IA32_DEBUGCTLMSR_TR) )
             return 0;
     }
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 91b407e6bc..b622ae1e60 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -65,7 +65,7 @@ static void vmx_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
     {
         unsigned long intr;
 
-        __vmread(VM_ENTRY_INTR_INFO, &intr);
+        intr = vmread(VM_ENTRY_INTR_INFO);
         TRACE(TRC_HVM_INTR_WINDOW, intack.vector, intack.source,
               (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1);
     }
@@ -83,7 +83,7 @@ static void vmx_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
          */
         unsigned long intr_shadow;
 
-        __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+        intr_shadow = vmread(GUEST_INTERRUPTIBILITY_INFO);
         if ( intr_shadow & VMX_INTR_SHADOW_STI )
         {
             /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
@@ -148,7 +148,7 @@ enum hvm_intblk cf_check nvmx_intr_blocked(struct vcpu *v)
         {
             unsigned long intr_info;
 
-            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+            intr_info = vmread(VM_ENTRY_INTR_INFO);
             if ( intr_info & INTR_INFO_VALID_MASK )
                 r = hvm_intblk_rflags_ie;
         }
@@ -275,7 +275,7 @@ void asmlinkage vmx_intr_assist(void)
                 goto out;
             }
 
-            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+            intr_info = vmread(VM_ENTRY_INTR_INFO);
             if ( intr_info & INTR_INFO_VALID_MASK )
             {
                 if ( (intack.source == hvm_intsrc_pic) ||
@@ -299,7 +299,7 @@ void asmlinkage vmx_intr_assist(void)
         }
         else
         {
-            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+            intr_info = vmread(VM_ENTRY_INTR_INFO);
             if ( intr_info & INTR_INFO_VALID_MASK )
             {
                 vmx_enable_intr_window(v, intack);
@@ -377,7 +377,7 @@ void asmlinkage vmx_intr_assist(void)
         }
 
         /* we need update the RVI field */
-        __vmread(GUEST_INTR_STATUS, &status);
+        status = vmread(GUEST_INTR_STATUS);
         status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
         status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK &
                     intack.vector;
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index ff44ddcfa6..40cbc273d0 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -159,7 +159,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
     unsigned int emulations = 0;
 
     /* Get-and-clear VM_ENTRY_INTR_INFO. */
-    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+    intr_info = vmread(VM_ENTRY_INTR_INFO);
     if ( intr_info & INTR_INFO_VALID_MASK )
         __vmwrite(VM_ENTRY_INTR_INFO, 0);
 
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index a44475ae15..f6267f65dd 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1031,7 +1031,7 @@ u64 virtual_vmcs_vmread(const struct vcpu *v, u32 
vmcs_encoding)
     u64 res;
 
     virtual_vmcs_enter(v);
-    __vmread(vmcs_encoding, &res);
+    res = vmread(vmcs_encoding);
     virtual_vmcs_exit(v);
 
     return res;
@@ -1691,7 +1691,7 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
 
     vmx_vmcs_enter(v);
 
-    __vmread(GUEST_PML_INDEX, &pml_idx);
+    pml_idx = vmread(GUEST_PML_INDEX);
 
     /* Do nothing if PML buffer is empty. */
     if ( pml_idx == (NR_PML_ENTRIES - 1) )
@@ -1876,7 +1876,7 @@ void vmx_vmentry_failure(void)
     struct vcpu *curr = current;
     unsigned long error;
 
-    __vmread(VM_INSTRUCTION_ERROR, &error);
+    error = vmread(VM_INSTRUCTION_ERROR);
     gprintk(XENLOG_ERR, "VM%s error: %#lx\n",
             curr->arch.hvm.vmx.launched ? "RESUME" : "LAUNCH", error);
 
@@ -1957,7 +1957,7 @@ void cf_check vmx_do_resume(void)
     hvm_do_resume(v);
 
     /* Sync host CR4 in case its value has changed. */
-    __vmread(HOST_CR4, &host_cr4);
+    host_cr4 = vmread(HOST_CR4);
     if ( host_cr4 != read_cr4() )
         __vmwrite(HOST_CR4, read_cr4());
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 827db6bdd8..203ca83c16 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -902,7 +902,7 @@ int cf_check vmx_guest_x86_mode(struct vcpu *v)
         return X86_MODE_REAL;
     if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
         return X86_MODE_VM86;
-    __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
+    cs_ar_bytes = vmread(GUEST_CS_AR_BYTES);
     if ( hvm_long_mode_active(v) &&
          likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
         return X86_MODE_64BIT;
@@ -952,7 +952,7 @@ static void __restore_debug_registers(struct vcpu *v)
  */
 static void vmx_restore_dr(struct vcpu *v)
 {
-    /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
+    /* NB. vmread() is not usable here, so we cannot read from the VMCS. */
     if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
         __restore_debug_registers(v);
 }
@@ -963,17 +963,17 @@ static void vmx_vmcs_save(struct vcpu *v, struct 
hvm_hw_cpu *c)
 
     vmx_vmcs_enter(v);
 
-    __vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
-    __vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
-    __vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
+    c->sysenter_cs = vmread(GUEST_SYSENTER_CS);
+    c->sysenter_esp = vmread(GUEST_SYSENTER_ESP);
+    c->sysenter_eip = vmread(GUEST_SYSENTER_EIP);
 
-    __vmread(VM_ENTRY_INTR_INFO, &ev);
+    ev = vmread(VM_ENTRY_INTR_INFO);
     if ( (ev & INTR_INFO_VALID_MASK) &&
          hvm_event_needs_reinjection(MASK_EXTR(ev, INTR_INFO_INTR_TYPE_MASK),
                                      ev & INTR_INFO_VECTOR_MASK) )
     {
         c->pending_event = ev;
-        __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &ev);
+        ev = vmread(VM_ENTRY_EXCEPTION_ERROR_CODE);
         c->error_code = ev;
     }
 
@@ -1199,7 +1199,7 @@ unsigned int vmx_get_cpl(void)
 {
     unsigned long attr;
 
-    __vmread(GUEST_SS_AR_BYTES, &attr);
+    attr = vmread(GUEST_SS_AR_BYTES);
 
     return MASK_EXTR(attr, X86_SEG_AR_DPL);
 }
@@ -1271,14 +1271,14 @@ static void cf_check vmx_get_segment_register(
         fallthrough;
 
     case x86_seg_es ... x86_seg_gs:
-        __vmread(GUEST_SEG_SELECTOR(tmp_seg), &sel);
-        __vmread(GUEST_SEG_AR_BYTES(tmp_seg), &attr);
+        sel = vmread(GUEST_SEG_SELECTOR(tmp_seg));
+        attr = vmread(GUEST_SEG_AR_BYTES(tmp_seg));
         fallthrough;
 
     case x86_seg_gdtr:
     case x86_seg_idtr:
-        __vmread(GUEST_SEG_LIMIT(tmp_seg),    &limit);
-        __vmread(GUEST_SEG_BASE(tmp_seg),     &reg->base);
+        limit = vmread(GUEST_SEG_LIMIT(tmp_seg));
+        reg->base = vmread(GUEST_SEG_BASE(tmp_seg));
         break;
 
     default:
@@ -1436,7 +1436,7 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 
*gpat)
         return 0;
 
     vmx_vmcs_enter(v);
-    __vmread(GUEST_PAT, gpat);
+    *gpat = vmread(GUEST_PAT);
     vmx_vmcs_exit(v);
     return 1;
 }
@@ -1557,7 +1557,7 @@ static unsigned int cf_check 
vmx_get_interrupt_shadow(struct vcpu *v)
 {
     unsigned long intr_shadow;
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+    intr_shadow = vmread(GUEST_INTERRUPTIBILITY_INFO);
 
     return intr_shadow;
 }
@@ -1573,12 +1573,12 @@ static void cf_check vmx_get_nonreg_state(struct vcpu 
*v,
 {
     vmx_vmcs_enter(v);
 
-    __vmread(GUEST_ACTIVITY_STATE, &nrs->vmx.activity_state);
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &nrs->vmx.interruptibility_info);
-    __vmread(GUEST_PENDING_DBG_EXCEPTIONS, &nrs->vmx.pending_dbg);
+    nrs->vmx.activity_state = vmread(GUEST_ACTIVITY_STATE);
+    nrs->vmx.interruptibility_info = vmread(GUEST_INTERRUPTIBILITY_INFO);
+    nrs->vmx.pending_dbg = vmread(GUEST_PENDING_DBG_EXCEPTIONS);
 
     if ( cpu_has_vmx_virtual_intr_delivery )
-        __vmread(GUEST_INTR_STATUS, &nrs->vmx.interrupt_status);
+        nrs->vmx.interrupt_status = vmread(GUEST_INTR_STATUS);
 
     vmx_vmcs_exit(v);
 }
@@ -1896,7 +1896,7 @@ static void cf_check vmx_update_guest_efer(struct vcpu *v)
      * The intended guest running mode is derived from VM_ENTRY_IA32E_MODE,
      * which (architecturally) is the guest's LMA setting.
      */
-    __vmread(VM_ENTRY_CONTROLS, &entry_ctls);
+    entry_ctls = vmread(VM_ENTRY_CONTROLS);
 
     entry_ctls &= ~VM_ENTRY_IA32E_MODE;
     if ( guest_efer & EFER_LMA )
@@ -2063,9 +2063,9 @@ static void cf_check vmx_inject_event(const struct 
x86_event *event)
         {
             unsigned long val;
 
-            __vmread(GUEST_DR7, &val);
+            val = vmread(GUEST_DR7);
             __vmwrite(GUEST_DR7, val & ~DR_GENERAL_DETECT);
-            __vmread(GUEST_IA32_DEBUGCTL, &val);
+            val = vmread(GUEST_IA32_DEBUGCTL);
             __vmwrite(GUEST_IA32_DEBUGCTL, val & ~IA32_DEBUGCTLMSR_LBR);
         }
         if ( cpu_has_monitor_trap_flag )
@@ -2089,7 +2089,7 @@ static void cf_check vmx_inject_event(const struct 
x86_event *event)
     if ( nestedhvm_vcpu_in_guestmode(curr) )
         intr_info = vcpu_2_nvmx(curr).intr.intr_info;
     else
-        __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+        intr_info = vmread(VM_ENTRY_INTR_INFO);
 
     if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
          (MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) == X86_ET_HW_EXC) )
@@ -2128,7 +2128,7 @@ static bool cf_check vmx_event_pending(const struct vcpu 
*v)
     unsigned long intr_info;
 
     ASSERT(v == current);
-    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+    intr_info = vmread(VM_ENTRY_INTR_INFO);
 
     return intr_info & INTR_INFO_VALID_MASK;
 }
@@ -2149,7 +2149,7 @@ static void cf_check vmx_set_info_guest(struct vcpu *v)
      * to set the GUEST_PENDING_DBG_EXCEPTIONS.BS here incurs
      * immediately vmexit and hence make no progress.
      */
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+    intr_shadow = vmread(GUEST_INTERRUPTIBILITY_INFO);
     if ( v->domain->debugger_attached &&
          (v->arch.user_regs.eflags & X86_EFLAGS_TF) &&
          (intr_shadow & VMX_INTR_SHADOW_STI) )
@@ -2178,7 +2178,7 @@ static u8 set_svi(int isr)
     if ( isr < 0 )
         isr = 0;
 
-    __vmread(GUEST_INTR_STATUS, &status);
+    status = vmread(GUEST_INTR_STATUS);
     old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
     if ( isr != old )
     {
@@ -2518,9 +2518,9 @@ static bool cf_check vmx_vcpu_emulate_ve(struct vcpu *v)
     veinfo->eptp_index = vcpu_altp2m(v).p2midx;
 
     vmx_vmcs_enter(v);
-    __vmread(EXIT_QUALIFICATION, &veinfo->exit_qualification);
-    __vmread(GUEST_LINEAR_ADDRESS, &veinfo->gla);
-    __vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa);
+    veinfo->exit_qualification = vmread(EXIT_QUALIFICATION);
+    veinfo->gla = vmread(GUEST_LINEAR_ADDRESS);
+    veinfo->gpa = vmread(GUEST_PHYSICAL_ADDRESS);
     vmx_vmcs_exit(v);
 
     hvm_inject_hw_exception(X86_EXC_VE,
@@ -2541,8 +2541,8 @@ static bool cf_check vmx_get_pending_event(
     unsigned long intr_info, error_code;
 
     vmx_vmcs_enter(v);
-    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
-    __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &error_code);
+    intr_info = vmread(VM_ENTRY_INTR_INFO);
+    error_code = vmread(VM_ENTRY_EXCEPTION_ERROR_CODE);
     vmx_vmcs_exit(v);
 
     if ( !(intr_info & INTR_INFO_VALID_MASK) )
@@ -2739,11 +2739,11 @@ static uint64_t cf_check vmx_get_reg(struct vcpu *v, 
unsigned int reg)
     {
     case MSR_SPEC_CTRL:
         ASSERT(cpu_has_vmx_virt_spec_ctrl);
-        __vmread(SPEC_CTRL_SHADOW, &val);
+        val = vmread(SPEC_CTRL_SHADOW);
         break;
 
     case MSR_IA32_BNDCFGS:
-        __vmread(GUEST_BNDCFGS, &val);
+        val = vmread(GUEST_BNDCFGS);
         break;
 
     default:
@@ -3163,7 +3163,8 @@ static int get_instruction_length(void)
 {
     unsigned long len;
 
-    __vmread(VM_EXIT_INSTRUCTION_LEN, &len); /* Safe: callers audited */
+    /* Safe: callers audited */
+    len = vmread(VM_EXIT_INSTRUCTION_LEN);
     BUG_ON((len < 1) || (len > MAX_INST_LEN));
     return len;
 }
@@ -3176,7 +3177,7 @@ void update_guest_eip(void)
     regs->rip += get_instruction_length(); /* Safe: callers audited */
     regs->eflags &= ~X86_EFLAGS_RF;
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &x);
+    x = vmread(GUEST_INTERRUPTIBILITY_INFO);
     if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
     {
         x &= ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS);
@@ -3424,21 +3425,21 @@ static int cf_check vmx_msr_read_intercept(
     switch ( msr )
     {
     case MSR_IA32_SYSENTER_CS:
-        __vmread(GUEST_SYSENTER_CS, msr_content);
+        *msr_content = vmread(GUEST_SYSENTER_CS);
         break;
     case MSR_IA32_SYSENTER_ESP:
-        __vmread(GUEST_SYSENTER_ESP, msr_content);
+        *msr_content = vmread(GUEST_SYSENTER_ESP);
         break;
     case MSR_IA32_SYSENTER_EIP:
-        __vmread(GUEST_SYSENTER_EIP, msr_content);
+        *msr_content = vmread(GUEST_SYSENTER_EIP);
         break;
 
     case MSR_FS_BASE:
-        __vmread(GUEST_FS_BASE, msr_content);
+        *msr_content = vmread(GUEST_FS_BASE);
         break;
 
     case MSR_GS_BASE:
-        __vmread(GUEST_GS_BASE, msr_content);
+        *msr_content = vmread(GUEST_GS_BASE);
         break;
 
     case MSR_SHADOW_GS_BASE:
@@ -3462,7 +3463,7 @@ static int cf_check vmx_msr_read_intercept(
         break;
 
     case MSR_IA32_DEBUGCTLMSR:
-        __vmread(GUEST_IA32_DEBUGCTL, msr_content);
+        *msr_content = vmread(GUEST_IA32_DEBUGCTL);
         break;
 
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
@@ -3828,7 +3829,7 @@ static void vmx_do_extint(struct cpu_user_regs *regs)
 {
     unsigned long vector;
 
-    __vmread(VM_EXIT_INTR_INFO, &vector);
+    vector = vmread(VM_EXIT_INTR_INFO);
     BUG_ON(!(vector & INTR_INFO_VALID_MASK));
 
     vector &= INTR_INFO_VECTOR_MASK;
@@ -3893,7 +3894,7 @@ static void ept_handle_violation(ept_qual_t q, paddr_t 
gpa)
 
     if ( q.gla_valid )
     {
-        __vmread(GUEST_LINEAR_ADDRESS, &gla);
+        gla = vmread(GUEST_LINEAR_ADDRESS);
         npfec.gla_valid = 1;
         if( q.gla_fault )
             npfec.kind = npfec_kind_with_gla;
@@ -3944,7 +3945,7 @@ static void vmx_failed_vmentry(unsigned int exit_reason,
     struct vcpu *curr = current;
 
     printk("%pv vmentry failure (reason %#x): ", curr, exit_reason);
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
+    exit_qualification = vmread(EXIT_QUALIFICATION);
     switch ( failed_vmentry_reason )
     {
     case EXIT_REASON_INVALID_GUEST_STATE:
@@ -4007,7 +4008,7 @@ static int vmx_handle_eoi_write(void)
      * 1. Must be a linear access data write.
      * 2. Data write must be to the EOI register.
      */
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
+    exit_qualification = vmread(EXIT_QUALIFICATION);
     if ( (((exit_qualification >> 12) & 0xf) == 1) &&
          ((exit_qualification & 0xfff) == APIC_EOI) )
     {
@@ -4037,7 +4038,7 @@ static void vmx_propagate_intr(unsigned long intr)
 
     if ( intr & INTR_INFO_DELIVER_CODE_MASK )
     {
-        __vmread(VM_EXIT_INTR_ERROR_CODE, &tmp);
+        tmp = vmread(VM_EXIT_INTR_ERROR_CODE);
         event.error_code = tmp;
     }
     else
@@ -4045,7 +4046,7 @@ static void vmx_propagate_intr(unsigned long intr)
 
     if ( event.type >= X86_ET_SW_INT )
     {
-        __vmread(VM_EXIT_INSTRUCTION_LEN, &tmp);
+        tmp = vmread(VM_EXIT_INSTRUCTION_LEN);
         event.insn_len = tmp;
     }
     else
@@ -4071,7 +4072,7 @@ static void vmx_idtv_reinject(unsigned long idtv_info)
             {
                 unsigned long ec;
 
-                __vmread(IDT_VECTORING_ERROR_CODE, &ec);
+                ec = vmread(IDT_VECTORING_ERROR_CODE);
                 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, ec);
             }
         }
@@ -4086,7 +4087,7 @@ static void vmx_idtv_reinject(unsigned long idtv_info)
         {
             unsigned long intr_info;
 
-            __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_info);
+            intr_info = vmread(GUEST_INTERRUPTIBILITY_INFO);
             __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
                       intr_info & ~VMX_INTR_SHADOW_NMI);
         }
@@ -4111,8 +4112,8 @@ static void vmx_handle_descriptor_access(uint32_t 
exit_reason)
     uint64_t exit_qualification;
     unsigned int desc;
 
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
-    __vmread(VMX_INSTRUCTION_INFO, &instr_info);
+    exit_qualification = vmread(EXIT_QUALIFICATION);
+    instr_info = vmread(VMX_INSTRUCTION_INFO);
 
     if ( exit_reason == EXIT_REASON_ACCESS_GDTR_OR_IDTR )
     {
@@ -4137,7 +4138,7 @@ static int vmx_handle_apic_write(void)
     unsigned long exit_qualification;
 
     ASSERT(cpu_has_vmx_apic_reg_virt);
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
+    exit_qualification = vmread(EXIT_QUALIFICATION);
 
     return vlapic_apicv_write(current, exit_qualification & 0xfff);
 }
@@ -4146,7 +4147,7 @@ static void undo_nmis_unblocked_by_iret(void)
 {
     unsigned long guest_info;
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &guest_info);
+    guest_info = vmread(GUEST_INTERRUPTIBILITY_INFO);
     __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
               guest_info | VMX_INTR_SHADOW_NMI);
 }
@@ -4159,12 +4160,12 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
     struct vcpu *v = current;
     struct domain *currd = v->domain;
 
-    __vmread(GUEST_RIP,    &regs->rip);
-    __vmread(GUEST_RSP,    &regs->rsp);
-    __vmread(GUEST_RFLAGS, &regs->rflags);
+    regs->rip = vmread(GUEST_RIP);
+    regs->rsp = vmread(GUEST_RSP);
+    regs->rflags = vmread(GUEST_RFLAGS);
 
     if ( hvm_long_mode_active(v) )
-        __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
+        cs_ar_bytes = vmread(GUEST_CS_AR_BYTES);
 
     hvm_sanitize_regs_fields(regs, !(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE));
 
@@ -4174,17 +4175,17 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
          * Xen allows the guest to modify some CR4 bits directly, update cached
          * values to match.
          */
-        __vmread(GUEST_CR4, &v->arch.hvm.hw_cr[4]);
+        v->arch.hvm.hw_cr[4] = vmread(GUEST_CR4);
         v->arch.hvm.guest_cr[4] &= v->arch.hvm.vmx.cr4_host_mask;
         v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] &
                                     ~v->arch.hvm.vmx.cr4_host_mask);
 
-        __vmread(GUEST_CR3, &v->arch.hvm.hw_cr[3]);
+        v->arch.hvm.hw_cr[3] = vmread(GUEST_CR3);
         if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) )
             v->arch.hvm.guest_cr[3] = v->arch.hvm.hw_cr[3];
     }
 
-    __vmread(VM_EXIT_REASON, &exit_reason);
+    exit_reason = vmread(VM_EXIT_REASON);
 
     if ( hvm_long_mode_active(v) )
         TRACE_TIME(TRC_HVM_VMX_EXIT64, exit_reason, regs->rip, regs->rip >> 
32);
@@ -4200,7 +4201,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         vmx_do_extint(regs);
         break;
     case EXIT_REASON_EXCEPTION_NMI:
-        __vmread(VM_EXIT_INTR_INFO, &intr_info);
+        intr_info = vmread(VM_EXIT_INTR_INFO);
         BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
         vector = intr_info & INTR_INFO_VECTOR_MASK;
         if ( vector == X86_EXC_MC )
@@ -4237,12 +4238,12 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
         if ( v->arch.hvm.vmx.secondary_exec_control &
             SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
-            __vmread(EPTP_INDEX, &idx);
+            idx = vmread(EPTP_INDEX);
         else
         {
             unsigned long eptp;
 
-            __vmread(EPT_POINTER, &eptp);
+            eptp = vmread(EPT_POINTER);
 
             if ( (idx = p2m_find_altp2m_by_eptp(v->domain, eptp)) ==
                  INVALID_ALTP2M )
@@ -4261,7 +4262,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
     {
         int rc;
 
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
         rc = hvm_monitor_vmexit(exit_reason, exit_qualification);
         if ( rc < 0 )
             goto exit_and_crash;
@@ -4327,7 +4328,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
     hvm_maybe_deassert_evtchn_irq();
 
-    __vmread(IDT_VECTORING_INFO, &idtv_info);
+    idtv_info = vmread(IDT_VECTORING_INFO);
     if ( exit_reason != EXIT_REASON_TASK_SWITCH )
         vmx_idtv_reinject(idtv_info);
 
@@ -4362,7 +4363,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
              * Updates DR6 where debugger can peek (See 3B 23.2.1,
              * Table 23-1, "Exit Qualification for Debug Exceptions").
              */
-            __vmread(EXIT_QUALIFICATION, &exit_qualification);
+            exit_qualification = vmread(EXIT_QUALIFICATION);
             TRACE(TRC_HVM_TRAP_DEBUG, exit_qualification);
             __restore_debug_registers(v);
             write_debugreg(6, exit_qualification | DR_STATUS_RESERVED_ONE);
@@ -4390,15 +4391,14 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
             {
                 unsigned long int_info;
 
-                __vmread(GUEST_INTERRUPTIBILITY_INFO, &int_info);
+                int_info = vmread(GUEST_INTERRUPTIBILITY_INFO);
 
                 if ( int_info & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) 
)
                 {
                     unsigned long pending_dbg;
 
-                    __vmread(GUEST_PENDING_DBG_EXCEPTIONS, &pending_dbg);
-                    __vmwrite(GUEST_PENDING_DBG_EXCEPTIONS,
-                              pending_dbg | DR_STEP);
+                    pending_dbg = vmread(GUEST_PENDING_DBG_EXCEPTIONS);
+                    __vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg | 
DR_STEP);
                 }
             }
 
@@ -4410,7 +4410,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
                                                     INTR_INFO_INTR_TYPE_MASK);
 
                 if ( trap_type >= X86_ET_SW_INT )
-                    __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
+                    insn_len = vmread(VM_EXIT_INSTRUCTION_LEN);
 
                 rc = hvm_monitor_debug(regs->rip,
                                        HVM_MONITOR_DEBUG_EXCEPTION,
@@ -4431,7 +4431,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
                 unsigned long insn_len;
                 int rc;
 
-                __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
+                insn_len = vmread(VM_EXIT_INSTRUCTION_LEN);
                 rc = hvm_monitor_debug(regs->rip,
                                        HVM_MONITOR_SOFTWARE_BREAKPOINT,
                                        X86_ET_SW_EXC,
@@ -4454,8 +4454,8 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
             vmx_fpu_dirty_intercept();
             break;
         case X86_EXC_PF:
-            __vmread(EXIT_QUALIFICATION, &exit_qualification);
-            __vmread(VM_EXIT_INTR_ERROR_CODE, &ecode);
+            exit_qualification = vmread(EXIT_QUALIFICATION);
+            ecode = vmread(VM_EXIT_INTR_ERROR_CODE);
             regs->error_code = ecode;
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU,
@@ -4522,7 +4522,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         };
         unsigned int inst_len, source;
 
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
         source = (exit_qualification >> 30) & 3;
         /* Vectored event should fill in interrupt information. */
         WARN_ON((source == 3) && !(idtv_info & INTR_INFO_VALID_MASK));
@@ -4536,7 +4536,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
                      > 3)) /* IntrType > 3? */
             ? get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0;
         if ( (source == 3) && (idtv_info & INTR_INFO_DELIVER_CODE_MASK) )
-            __vmread(IDT_VECTORING_ERROR_CODE, &ecode);
+            ecode = vmread(IDT_VECTORING_ERROR_CODE);
         else
              ecode = -1;
 
@@ -4565,7 +4565,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
     case EXIT_REASON_INVLPG:
         update_guest_eip(); /* Safe: INVLPG */
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
         vmx_invlpg_intercept(exit_qualification);
         break;
     case EXIT_REASON_RDTSCP:
@@ -4591,13 +4591,13 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
     case EXIT_REASON_CR_ACCESS:
     {
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
         if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY )
             update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
         break;
     }
     case EXIT_REASON_DR_ACCESS:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
         vmx_dr_access(exit_qualification, regs);
         break;
     case EXIT_REASON_MSR_READ:
@@ -4671,7 +4671,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
 
     case EXIT_REASON_EOI_INDUCED:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
 
         ASSERT(cpu_has_vmx_virtual_intr_delivery);
 
@@ -4695,7 +4695,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         unsigned int bytes;
         int rc;
 
-        __vmread(EXIT_QUALIFICATION, &io_qual.raw);
+        io_qual.raw = vmread(EXIT_QUALIFICATION);
         bytes = io_qual.size + 1;
 
         rc = hvm_monitor_io(io_qual.port, bytes, io_qual.in, io_qual.str);
@@ -4730,8 +4730,8 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
     {
         paddr_t gpa;
 
-        __vmread(GUEST_PHYSICAL_ADDRESS, &gpa);
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        gpa = vmread(GUEST_PHYSICAL_ADDRESS);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
 
         if ( unlikely(exit_qualification & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
              !(idtv_info & INTR_INFO_VALID_MASK) )
@@ -4745,7 +4745,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
     {
         paddr_t gpa;
 
-        __vmread(GUEST_PHYSICAL_ADDRESS, &gpa);
+        gpa = vmread(GUEST_PHYSICAL_ADDRESS);
         if ( !ept_handle_misconfig(gpa) )
             goto exit_and_crash;
         break;
@@ -4781,7 +4781,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
 
     case EXIT_REASON_PML_FULL:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
 
         if ( unlikely(exit_qualification & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
              !(idtv_info & INTR_INFO_VALID_MASK) )
@@ -4804,7 +4804,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
 
     case EXIT_REASON_NOTIFY:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread(EXIT_QUALIFICATION);
 
         if ( unlikely(exit_qualification & NOTIFY_VM_CONTEXT_INVALID) )
         {
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index ceb5e5a322..720c86aeaf 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -400,7 +400,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
     unsigned long base, index, seg_base, disp, offset;
     int scale, size;
 
-    __vmread(VMX_INSTRUCTION_INFO, &offset);
+    offset = vmread(VMX_INSTRUCTION_INFO);
     info.word = offset;
 
     if ( info.fields.memreg ) {
@@ -428,7 +428,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
 
         scale = 1 << info.fields.scaling;
 
-        __vmread(EXIT_QUALIFICATION, &disp);
+        disp = vmread(EXIT_QUALIFICATION);
 
         size = 1 << (info.fields.addr_size + 1);
 
@@ -997,7 +997,7 @@ static void vvmcs_to_shadow_bulk(struct vcpu *v, unsigned 
int n,
 
     virtual_vmcs_enter(v);
     for ( i = 0; i < n; i++ )
-        __vmread(field[i], &value[i]);
+        value[i] = vmread(field[i]);
     virtual_vmcs_exit(v);
 
     for ( i = 0; i < n; i++ )
@@ -1036,7 +1036,7 @@ static void shadow_to_vvmcs_bulk(struct vcpu *v, unsigned 
int n,
     }
 
     for ( i = 0; i < n; i++ )
-        __vmread(field[i], &value[i]);
+        value[i] = vmread(field[i]);
 
     virtual_vmcs_enter(v);
     for ( i = 0; i < n; i++ )
@@ -1405,7 +1405,7 @@ static void nvmx_update_apicv(struct vcpu *v)
     }
     else
        /* Keep previous SVI if there's any. */
-       __vmread(GUEST_INTR_STATUS, &status);
+       status = vmread(GUEST_INTR_STATUS);
 
     rvi = vlapic_has_pending_irq(v);
     if ( rvi != -1 )
@@ -1696,7 +1696,7 @@ static int nvmx_handle_vmresume(struct cpu_user_regs 
*regs)
         return X86EMUL_OKAY;        
     }
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+    intr_shadow = vmread(GUEST_INTERRUPTIBILITY_INFO);
     if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
     {
         vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
@@ -1732,7 +1732,7 @@ static int nvmx_handle_vmlaunch(struct cpu_user_regs 
*regs)
         return X86EMUL_OKAY;
     }
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+    intr_shadow = vmread(GUEST_INTERRUPTIBILITY_INFO);
     if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
     {
         vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
@@ -2355,7 +2355,7 @@ int cf_check nvmx_hap_walk_L1_p2m(
 
     vmx_vmcs_enter(v);
 
-    __vmread(EXIT_QUALIFICATION, &exit_qual);
+    exit_qual = vmread(EXIT_QUALIFICATION);
     rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc,
                              &exit_qual, &exit_reason);
     switch ( rc )
@@ -2391,7 +2391,7 @@ void nvmx_idtv_handling(void)
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long idtv_info, reason;
 
-    __vmread(IDT_VECTORING_INFO, &idtv_info);
+    idtv_info = vmread(IDT_VECTORING_INFO);
     if ( likely(!(idtv_info & INTR_INFO_VALID_MASK)) )
         return;
 
@@ -2399,7 +2399,7 @@ void nvmx_idtv_handling(void)
      * If L0 can solve the fault that causes idt vectoring, it should
      * be reinjected, otherwise, pass to L1.
      */
-    __vmread(VM_EXIT_REASON, &reason);
+    reason = vmread(VM_EXIT_REASON);
     if ( (uint16_t)reason != EXIT_REASON_EPT_VIOLATION ?
          !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) :
          !nvcpu->nv_vmexit_pending )
@@ -2407,7 +2407,7 @@ void nvmx_idtv_handling(void)
         __vmwrite(VM_ENTRY_INTR_INFO, idtv_info & ~INTR_INFO_RESVD_BITS_MASK);
         if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK )
         {
-            __vmread(IDT_VECTORING_ERROR_CODE, &reason);
+            reason = vmread(IDT_VECTORING_ERROR_CODE);
             __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, reason);
         }
         /*
@@ -2418,7 +2418,7 @@ void nvmx_idtv_handling(void)
          * This means EXIT_INSTRUCTION_LEN is always valid here, for
          * software interrupts both injected by L1, and generated in L2.
          */
-        __vmread(VM_EXIT_INSTRUCTION_LEN, &reason);
+        reason = vmread(VM_EXIT_INSTRUCTION_LEN);
         __vmwrite(VM_ENTRY_INSTRUCTION_LEN, reason);
    }
 }
@@ -2452,7 +2452,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
         u64 exec_bitmap;
         int vector;
 
-        __vmread(VM_EXIT_INTR_INFO, &intr_info);
+        intr_info = vmread(VM_EXIT_INTR_INFO);
         vector = intr_info & INTR_INFO_VECTOR_MASK;
         /*
          * decided by L0 and L1 exception bitmap, if the vetor is set by
@@ -2531,7 +2531,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
             unsigned long qual;
             u16 port, size;
 
-            __vmread(EXIT_QUALIFICATION, &qual);
+            qual = vmread(EXIT_QUALIFICATION);
             port = qual >> 16;
             size = (qual & 7) + 1;
             do {
@@ -2638,7 +2638,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
         cr_access_qual_t qual;
         u32 mask = 0;
 
-        __vmread(EXIT_QUALIFICATION, &qual.raw);
+        qual.raw = vmread(EXIT_QUALIFICATION);
         /* also according to guest exec_control */
         ctrl = __n2_exec_control(v);
 
@@ -2680,7 +2680,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
                 {
                     u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
 
-                    __vmread(CR0_READ_SHADOW, &old_val);
+                    old_val = vmread(CR0_READ_SHADOW);
                     changed_bits = old_val ^ val;
                     if ( changed_bits & cr0_gh_mask )
                         nvcpu->nv_vmexit_pending = 1;
@@ -2696,7 +2696,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
                 {
                     u64 cr4_gh_mask = get_vvmcs(v, CR4_GUEST_HOST_MASK);
 
-                    __vmread(CR4_READ_SHADOW, &old_val);
+                    old_val = vmread(CR4_READ_SHADOW);
                     changed_bits = old_val ^ val;
                     if ( changed_bits & cr4_gh_mask )
                         nvcpu->nv_vmexit_pending = 1;
@@ -2732,7 +2732,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
             {
                 u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
 
-                __vmread(CR0_READ_SHADOW, &old_val);
+                old_val = vmread(CR0_READ_SHADOW);
                 old_val &= X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS;
                 val = qual.lmsw_data &
                       (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS);
-- 
2.34.1





 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.