WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 17 of 20] VM exit handler of n2-guest

To: Tim.Deegan@xxxxxxxxxx
Subject: [Xen-devel] [PATCH 17 of 20] VM exit handler of n2-guest
From: Eddie Dong <eddie.dong@xxxxxxxxx>
Date: Thu, 09 Jun 2011 16:25:22 +0800
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Thu, 09 Jun 2011 01:50:23 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1307607905@xxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1307607905@xxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Eddie Dong <eddie.dong@xxxxxxxxx>
# Date 1307607849 -28800
# Node ID 5c3ab1e07ab1c1a903660f1c48a54aa67f738a7e
# Parent  4496678bbb000792aafa7e34a14ab893f5a32b8e
VM exit handler of n2-guest

Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>

diff -r 4496678bbb00 -r 5c3ab1e07ab1 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
@@ -943,6 +943,10 @@ static void vmx_set_segment_register(str
 static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
 {
     vmx_vmcs_enter(v);
+
+    if ( nestedhvm_vcpu_in_guestmode(v) )
+        offset += nvmx_get_tsc_offset(v);
+
     __vmwrite(TSC_OFFSET, offset);
 #if defined (__i386__)
     __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
@@ -2258,6 +2262,11 @@ asmlinkage void vmx_vmexit_handler(struc
      * any pending vmresume has really happened
      */
     vcpu_nestedhvm(v).nv_vmswitch_in_progress = 0;
+    if ( nestedhvm_vcpu_in_guestmode(v) )
+    {
+        if ( nvmx_n2_vmexit_handler(regs, exit_reason) )
+            goto out;
+    }
 
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
         return vmx_failed_vmentry(exit_reason, regs);
@@ -2655,6 +2664,7 @@ asmlinkage void vmx_vmexit_handler(struc
         break;
     }
 
+out:
     if ( nestedhvm_vcpu_in_guestmode(v) )
         nvmx_idtv_handling();
 }
diff -r 4496678bbb00 -r 5c3ab1e07ab1 xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
@@ -286,13 +286,19 @@ static int vmx_inst_check_privilege(stru
     if ( (regs->eflags & X86_EFLAGS_VM) ||
          (hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
         goto invalid_op;
-    /* TODO: check vmx operation mode */
+    else if ( nestedhvm_vcpu_in_guestmode(v) )
+        goto vmexit;
 
     if ( (cs.sel & 3) > 0 )
         goto gp_fault;
 
     return X86EMUL_OKAY;
 
+vmexit:
+    gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: vmexit\n");
+    vcpu_nestedhvm(v).nv_vmexit_pending = 1;
+    return X86EMUL_EXCEPTION;
+    
 invalid_op:
     gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: invalid_op\n");
     hvm_inject_exception(TRAP_invalid_op, 0, 0);
@@ -589,6 +595,18 @@ static void nvmx_purge_vvmcs(struct vcpu
     }
 }
 
+u64 nvmx_get_tsc_offset(struct vcpu *v)
+{
+    u64 offset = 0;
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+    if ( __get_vvmcs(nvcpu->nv_vvmcx, CPU_BASED_VM_EXEC_CONTROL) &
+         CPU_BASED_USE_TSC_OFFSETING )
+        offset = __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET);
+
+    return offset;
+}
+
 /*
  * Context synchronized between shadow and virtual VMCS.
  */
@@ -738,6 +756,8 @@ static void load_shadow_guest_state(stru
     hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
     hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
 
+    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
     vvmcs_to_shadow(vvmcs, VM_ENTRY_INTR_INFO);
     vvmcs_to_shadow(vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE);
     vvmcs_to_shadow(vvmcs, VM_ENTRY_INSTRUCTION_LEN);
@@ -865,6 +885,8 @@ static void load_vvmcs_host_state(struct
     hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
     hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
 
+    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
     __set_vvmcs(vvmcs, VM_ENTRY_INTR_INFO, 0);
 }
 
@@ -1261,3 +1283,195 @@ void nvmx_idtv_handling(void)
    }
 }
 
+/*
+ * L2 VMExit handling
+ *    return 1: Done or skip the normal layer 0 hypervisor process.
+ *              Typically it requires layer 1 hypervisor processing
+ *              or it may be already processed here.
+ *           0: Require the normal layer 0 process.
+ */
+int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
+                               unsigned int exit_reason)
+{
+    struct vcpu *v = current;
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    u32 ctrl;
+    u16 port;
+    u8 *bitmap;
+
+    nvcpu->nv_vmexit_pending = 0;
+    nvmx->intr.intr_info = 0;
+    nvmx->intr.error_code = 0;
+
+    switch (exit_reason) {
+    case EXIT_REASON_EXCEPTION_NMI:
+    {
+        u32 intr_info = __vmread(VM_EXIT_INTR_INFO);
+        u32 valid_mask = (X86_EVENTTYPE_HW_EXCEPTION << 8) |
+                         INTR_INFO_VALID_MASK;
+        u64 exec_bitmap;
+        int vector = intr_info & INTR_INFO_VECTOR_MASK;
+
+        /*
+         * decided by L0 and L1 exception bitmap, if the vetor is set by
+         * both, L0 has priority on #PF, L1 has priority on others
+         */
+        if ( vector == TRAP_page_fault )
+        {
+            if ( paging_mode_hap(v->domain) )
+                nvcpu->nv_vmexit_pending = 1;
+        }
+        else if ( (intr_info & valid_mask) == valid_mask )
+        {
+            exec_bitmap =__get_vvmcs(nvcpu->nv_vvmcx, EXCEPTION_BITMAP);
+
+            if ( exec_bitmap & (1 << vector) )
+                nvcpu->nv_vmexit_pending = 1;
+        }
+        break;
+    }
+    case EXIT_REASON_WBINVD:
+    case EXIT_REASON_EPT_VIOLATION:
+    case EXIT_REASON_EPT_MISCONFIG:
+    case EXIT_REASON_EXTERNAL_INTERRUPT:
+        /* pass to L0 handler */
+        break;
+    case VMX_EXIT_REASONS_FAILED_VMENTRY:
+    case EXIT_REASON_TRIPLE_FAULT:
+    case EXIT_REASON_TASK_SWITCH:
+    case EXIT_REASON_CPUID:
+    case EXIT_REASON_MSR_READ:
+    case EXIT_REASON_MSR_WRITE:
+    case EXIT_REASON_VMCALL:
+    case EXIT_REASON_VMCLEAR:
+    case EXIT_REASON_VMLAUNCH:
+    case EXIT_REASON_VMPTRLD:
+    case EXIT_REASON_VMPTRST:
+    case EXIT_REASON_VMREAD:
+    case EXIT_REASON_VMRESUME:
+    case EXIT_REASON_VMWRITE:
+    case EXIT_REASON_VMXOFF:
+    case EXIT_REASON_VMXON:
+    case EXIT_REASON_INVEPT:
+        /* inject to L1 */
+        nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_IO_INSTRUCTION:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_ACTIVATE_IO_BITMAP )
+        {
+            port = __vmread(EXIT_QUALIFICATION) >> 16;
+            bitmap = nvmx->iobitmap[port >> 15];
+            if ( bitmap[(port <<1) >> 4] & (1 << (port & 0x7)) )
+                nvcpu->nv_vmexit_pending = 1;
+            if ( !nvcpu->nv_vmexit_pending )
+               gdprintk(XENLOG_WARNING, "L0 PIO %x.\n", port);
+        }
+        else if ( ctrl & CPU_BASED_UNCOND_IO_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+
+    case EXIT_REASON_PENDING_VIRT_INTR:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_VIRTUAL_INTR_PENDING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_PENDING_VIRT_NMI:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_VIRTUAL_NMI_PENDING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    /* L1 has priority handling several other types of exits */
+    case EXIT_REASON_HLT:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_HLT_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_RDTSC:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_RDTSC_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        else
+        {
+            uint64_t tsc;
+
+            /*
+             * special handler is needed if L1 doesn't intercept rdtsc,
+             * avoiding changing guest_tsc and messing up timekeeping in L1
+             */
+            tsc = hvm_get_guest_tsc(v);
+            tsc += __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET);
+            regs->eax = (uint32_t)tsc;
+            regs->edx = (uint32_t)(tsc >> 32);
+
+            return 1;
+        }
+        break;
+    case EXIT_REASON_RDPMC:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_RDPMC_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_MWAIT_INSTRUCTION:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_MWAIT_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_PAUSE_INSTRUCTION:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_PAUSE_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_MONITOR_INSTRUCTION:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_MONITOR_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_DR_ACCESS:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_MOV_DR_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_INVLPG:
+        ctrl = __n2_exec_control(v);
+        if ( ctrl & CPU_BASED_INVLPG_EXITING )
+            nvcpu->nv_vmexit_pending = 1;
+        break;
+    case EXIT_REASON_CR_ACCESS:
+    {
+        u64 exit_qualification = __vmread(EXIT_QUALIFICATION);
+        int cr = exit_qualification & 15;
+        int write = (exit_qualification >> 4) & 3;
+        u32 mask = 0;
+
+        /* also according to guest exec_control */
+        ctrl = __n2_exec_control(v);
+
+        if ( cr == 3 )
+        {
+            mask = write? CPU_BASED_CR3_STORE_EXITING:
+                          CPU_BASED_CR3_LOAD_EXITING;
+            if ( ctrl & mask )
+                nvcpu->nv_vmexit_pending = 1;
+        }
+        else if ( cr == 8 )
+        {
+            mask = write? CPU_BASED_CR8_STORE_EXITING:
+                          CPU_BASED_CR8_LOAD_EXITING;
+            if ( ctrl & mask )
+                nvcpu->nv_vmexit_pending = 1;
+        }
+        else  /* CR0, CR4, CLTS, LMSW */
+            nvcpu->nv_vmexit_pending = 1;
+
+        break;
+    }
+    default:
+        gdprintk(XENLOG_WARNING, "Unknown nested vmexit reason %x.\n",
+                 exit_reason);
+    }
+
+    return ( nvcpu->nv_vmexit_pending == 1 );
+}
+
diff -r 4496678bbb00 -r 5c3ab1e07ab1 xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
@@ -170,6 +170,9 @@ void nvmx_update_secondary_exec_control(
 void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
 asmlinkage void nvmx_switch_guest(void);
 void nvmx_idtv_handling(void);
+u64 nvmx_get_tsc_offset(struct vcpu *v);
+int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
+                          unsigned int exit_reason);
 
 #endif /* __ASM_X86_HVM_VVMX_H__ */
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>