[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 4/4] nestedhvm: replace VMCX_EADDR by INVALID_PADDR



On 12/14/16 18:11 +0800, Haozhong Zhang wrote:
... because INVALID_PADDR is a more general one.

Suggested-by: Jan Beulich <JBeulich@xxxxxxxx>
Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
xen/arch/x86/hvm/nestedhvm.c     |  2 +-
xen/arch/x86/hvm/svm/nestedsvm.c | 18 +++++++++---------
xen/arch/x86/hvm/svm/vmcb.c      |  2 +-
xen/arch/x86/hvm/vmx/vvmx.c      | 16 ++++++++--------
xen/include/asm-x86/hvm/vcpu.h   |  2 --
5 files changed, 19 insertions(+), 21 deletions(-)


Forgot to cc AMD maintainers.

diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c
index c4671d8..c09c5b2 100644
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -54,7 +54,7 @@ nestedhvm_vcpu_reset(struct vcpu *v)

    hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
    nv->nv_vvmcx = NULL;
-    nv->nv_vvmcxaddr = VMCX_EADDR;
+    nv->nv_vvmcxaddr = INVALID_PADDR;
    nv->nv_flushp2m = 0;
    nv->nv_p2m = NULL;

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 8c9b073..4d9de86 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -68,10 +68,10 @@ int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr)
    struct nestedvcpu *nv = &vcpu_nestedhvm(v);

    if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) {
-        ASSERT(nv->nv_vvmcxaddr != VMCX_EADDR);
+        ASSERT(nv->nv_vvmcxaddr != INVALID_PADDR);
        hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
        nv->nv_vvmcx = NULL;
-        nv->nv_vvmcxaddr = VMCX_EADDR;
+        nv->nv_vvmcxaddr = INVALID_PADDR;
    }

    if ( !nv->nv_vvmcx )
@@ -154,7 +154,7 @@ void nsvm_vcpu_destroy(struct vcpu *v)
    if (nv->nv_n2vmcx) {
        free_vmcb(nv->nv_n2vmcx);
        nv->nv_n2vmcx = NULL;
-        nv->nv_n2vmcx_pa = VMCX_EADDR;
+        nv->nv_n2vmcx_pa = INVALID_PADDR;
    }
    if (svm->ns_iomap)
        svm->ns_iomap = NULL;
@@ -164,8 +164,8 @@ int nsvm_vcpu_reset(struct vcpu *v)
{
    struct nestedsvm *svm = &vcpu_nestedsvm(v);

-    svm->ns_msr_hsavepa = VMCX_EADDR;
-    svm->ns_ovvmcb_pa = VMCX_EADDR;
+    svm->ns_msr_hsavepa = INVALID_PADDR;
+    svm->ns_ovvmcb_pa = INVALID_PADDR;

    svm->ns_tscratio = DEFAULT_TSC_RATIO;

@@ -425,7 +425,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct 
cpu_user_regs *regs)

    /* Check if virtual VMCB cleanbits are valid */
    vcleanbits_valid = 1;
-    if (svm->ns_ovvmcb_pa == VMCX_EADDR)
+    if ( svm->ns_ovvmcb_pa == INVALID_PADDR )
        vcleanbits_valid = 0;
    if (svm->ns_ovvmcb_pa != nv->nv_vvmcxaddr)
        vcleanbits_valid = 0;
@@ -674,7 +674,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs 
*regs,
    ns_vmcb = nv->nv_vvmcx;
    ASSERT(ns_vmcb != NULL);
    ASSERT(nv->nv_n2vmcx != NULL);
-    ASSERT(nv->nv_n2vmcx_pa != VMCX_EADDR);
+    ASSERT(nv->nv_n2vmcx_pa != INVALID_PADDR);

    /* Save values for later use. Needed for Nested-on-Nested and
     * Shadow-on-Shadow paging.
@@ -1490,8 +1490,8 @@ void nsvm_vcpu_switch(struct cpu_user_regs *regs)
    ASSERT(v->arch.hvm_svm.vmcb != NULL);
    ASSERT(nv->nv_n1vmcx != NULL);
    ASSERT(nv->nv_n2vmcx != NULL);
-    ASSERT(nv->nv_n1vmcx_pa != VMCX_EADDR);
-    ASSERT(nv->nv_n2vmcx_pa != VMCX_EADDR);
+    ASSERT(nv->nv_n1vmcx_pa != INVALID_PADDR);
+    ASSERT(nv->nv_n2vmcx_pa != INVALID_PADDR);

    if (nv->nv_vmexit_pending) {
 vmexit:
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 9ea014f..70d75e7 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -273,7 +273,7 @@ void svm_destroy_vmcb(struct vcpu *v)
    }

    nv->nv_n1vmcx = NULL;
-    nv->nv_n1vmcx_pa = VMCX_EADDR;
+    nv->nv_n1vmcx_pa = INVALID_PADDR;
    arch_svm->vmcb = NULL;
}

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 5523146..c4f19a0 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -114,7 +114,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
    nvmx->guest_vpid = 0;
    nvmx->vmxon_region_pa = INVALID_PADDR;
    nvcpu->nv_vvmcx = NULL;
-    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+    nvcpu->nv_vvmcxaddr = INVALID_PADDR;
    nvmx->intr.intr_info = 0;
    nvmx->intr.error_code = 0;
    nvmx->iobitmap[0] = NULL;
@@ -766,10 +766,10 @@ static void nvmx_purge_vvmcs(struct vcpu *v)
    int i;

    __clear_current_vvmcs(v);
-    if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+    if ( nvcpu->nv_vvmcxaddr != INVALID_PADDR )
        hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
    nvcpu->nv_vvmcx = NULL;
-    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+    nvcpu->nv_vvmcxaddr = INVALID_PADDR;
    v->arch.hvm_vmx.vmcs_shadow_maddr = 0;
    for (i=0; i<2; i++) {
        if ( nvmx->iobitmap[i] ) {
@@ -1393,7 +1393,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
    if ( nvmx_vcpu_in_vmx(v) )
    {
        vmreturn(regs,
-                 nvcpu->nv_vvmcxaddr != VMCX_EADDR ?
+                 nvcpu->nv_vvmcxaddr != INVALID_PADDR ?
                 VMFAIL_VALID : VMFAIL_INVALID);
        return X86EMUL_OKAY;
    }
@@ -1509,7 +1509,7 @@ static int nvmx_vmresume(struct vcpu *v, struct 
cpu_user_regs *regs)
    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);

    /* check VMCS is valid and IO BITMAP is set */
-    if ( (nvcpu->nv_vvmcxaddr != VMCX_EADDR) &&
+    if ( (nvcpu->nv_vvmcxaddr != INVALID_PADDR) &&
            ((nvmx->iobitmap[0] && nvmx->iobitmap[1]) ||
            !(__n2_exec_control(v) & CPU_BASED_ACTIVATE_IO_BITMAP) ) )
        nvcpu->nv_vmentry_pending = 1;
@@ -1529,7 +1529,7 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs)
    if ( rc != X86EMUL_OKAY )
        return rc;

-    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
    {
        vmreturn (regs, VMFAIL_INVALID);
        return X86EMUL_OKAY;
@@ -1554,7 +1554,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
    if ( rc != X86EMUL_OKAY )
        return rc;

-    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
    {
        vmreturn (regs, VMFAIL_INVALID);
        return X86EMUL_OKAY;
@@ -1599,7 +1599,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
    if ( nvcpu->nv_vvmcxaddr != gpa )
        nvmx_purge_vvmcs(v);

-    if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
+    if ( nvcpu->nv_vvmcxaddr == INVALID_PADDR )
    {
        bool_t writable;
        void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 1, &writable);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index d485536..7b411a8 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -97,8 +97,6 @@ static inline bool_t hvm_vcpu_io_need_completion(const struct 
hvm_vcpu_io *vio)
           !vio->io_req.data_is_ptr;
}

-#define VMCX_EADDR    (~0ULL)
-
struct nestedvcpu {
    bool_t nv_guestmode; /* vcpu in guestmode? */
    void *nv_vvmcx; /* l1 guest virtual VMCB/VMCS */
--
2.10.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.