WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] SVM patch to cleanup guest event injection logic, remove

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] SVM patch to cleanup guest event injection logic, remove unnecessary
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 18 May 2006 09:42:15 +0000
Delivery-date: Thu, 18 May 2006 02:44:29 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b4361ae1aabc2a6bbc65c9d9bdc9843915b2eb09
# Parent  3d85f350a66a006fd5df2c228cfd8b75e3240984
SVM patch to cleanup guest event injection logic, remove unnecessary
event_injecting variable.

Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>
---
 xen/arch/x86/hvm/svm/intr.c        |   17 +++--------------
 xen/arch/x86/hvm/svm/svm.c         |   34 ++++++++++++++++------------------
 xen/arch/x86/hvm/svm/vmcb.c        |    1 -
 xen/include/asm-x86/hvm/svm/vmcb.h |    1 -
 4 files changed, 19 insertions(+), 34 deletions(-)

diff -r 3d85f350a66a -r b4361ae1aabc xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c       Wed May 17 23:51:39 2006 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c       Wed May 17 23:53:01 2006 +0100
@@ -132,17 +132,13 @@ asmlinkage void svm_intr_assist(void)
     ASSERT(vmcb);
 
     /* Check if an Injection is active */
-    if (v->arch.hvm_svm.injecting_event) {
        /* Previous Interrupt delivery caused this Intercept? */
        if (vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0)) 
{
            v->arch.hvm_svm.saved_irq_vector = vmcb->exitintinfo.fields.vector;
 //           printk("Injecting PF#: saving IRQ from ExitInfo\n");
            vmcb->exitintinfo.bytes = 0;
-
-           /* bail out, we won't be injecting an interrupt this time */
-           return;
+           re_injecting = 1;
        }
-    }
 
     /* Guest's interrputs masked? */
     rflags = vmcb->rflags;
@@ -151,16 +147,9 @@ asmlinkage void svm_intr_assist(void)
        /* bail out, we won't be injecting an interrupt this time */
        return;
     }
-
-    /* Interrupt delivery caused an Intercept? */
-    if (vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0)) {
-//        printk("Re-injecting IRQ from ExitInfo\n");
-        intr_vector = vmcb->exitintinfo.fields.vector;
-        vmcb->exitintinfo.bytes = 0;
-        re_injecting = 1;
-    }
+  
     /* Previous interrupt still pending? */
-    else if (vmcb->vintr.fields.irq) {
+    if (vmcb->vintr.fields.irq) {
 //        printk("Re-injecting IRQ from Vintr\n");
         intr_vector = vmcb->vintr.fields.vector;
         vmcb->vintr.bytes = 0;
diff -r 3d85f350a66a -r b4361ae1aabc xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed May 17 23:51:39 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed May 17 23:53:01 2006 +0100
@@ -170,10 +170,10 @@ void asidpool_retire( struct vmcb_struct
    spin_unlock(&ASIDpool[core].asid_lock);
 }
 
-static inline void svm_inject_exception(struct vmcb_struct *vmcb, 
-                                        int trap, int ev, int error_code)
+static inline void svm_inject_exception(struct vcpu *v, int trap, int ev, int 
error_code)
 {
     eventinj_t event;
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     event.bytes = 0;            
     event.fields.v = 1;
@@ -329,7 +329,7 @@ static inline int long_mode_do_msr_write
         if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
         {
             printk("trying to set reserved bit in EFER\n");
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
             return 0;
         }
 
@@ -343,7 +343,7 @@ static inline int long_mode_do_msr_write
             {
                 printk("trying to set LME bit when "
                        "in paging mode or PAE bit is not set\n");
-                svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+                svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
                 return 0;
             }
             set_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state);
@@ -367,7 +367,7 @@ static inline int long_mode_do_msr_write
         if (!IS_CANO_ADDRESS(msr_content))
         {
             HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
         }
 
         if (regs->ecx == MSR_FS_BASE)
@@ -900,7 +900,7 @@ static void svm_do_general_protection_fa
 
     
     /* Reflect it back into the guest */
-    svm_inject_exception(vmcb, TRAP_gp_fault, 1, error_code);
+    svm_inject_exception(v, TRAP_gp_fault, 1, error_code);
 }
 
 /* Reserved bits: [31:14], [12:1] */
@@ -1191,7 +1191,7 @@ static void svm_get_prefix_info(struct v
 
 
 /* Get the address of INS/OUTS instruction */
-static inline int svm_get_io_address(struct vmcb_struct *vmcb, 
+static inline int svm_get_io_address(struct vcpu *v, 
                struct cpu_user_regs *regs, unsigned int dir, 
         unsigned long *count, unsigned long *addr)
 {
@@ -1201,6 +1201,7 @@ static inline int svm_get_io_address(str
     int                  long_mode;
     ioio_info_t          info;
     segment_selector_t  *seg = NULL;
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     info.bytes = vmcb->exitinfo1;
 
@@ -1244,7 +1245,7 @@ static inline int svm_get_io_address(str
     /* If the segment isn't present, give GP fault! */
     if (!long_mode && !seg->attributes.fields.p) 
     {
-        svm_inject_exception(vmcb, TRAP_gp_fault, 1, seg->sel);
+        svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel);
         return 0;
     }
 
@@ -1262,7 +1263,7 @@ static inline int svm_get_io_address(str
     if (!long_mode) {
         if (*addr > seg->limit) 
         {
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, seg->sel);
+            svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel);
             return 0;
         } 
         else 
@@ -1310,7 +1311,7 @@ static void svm_io_instruction(struct vc
         unsigned long addr, count;
         int sign = regs->eflags & EF_DF ? -1 : 1;
 
-        if (!svm_get_io_address(vmcb, regs, dir, &count, &addr)) 
+        if (!svm_get_io_address(v, regs, dir, &count, &addr)) 
         {
             /* We failed to get a valid address, so don't do the IO operation 
- 
              * it would just get worse if we do! Hopefully the guest is handing
@@ -1415,7 +1416,7 @@ static int svm_set_cr0(unsigned long val
                     &v->arch.hvm_svm.cpu_state))
         {
             HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
         }
 
         if (test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
@@ -1489,7 +1490,7 @@ static int svm_set_cr0(unsigned long val
      */
     if ((value & X86_CR0_PE) == 0) {
        if (value & X86_CR0_PG) {
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             return 0;
         }
 
@@ -1736,7 +1737,7 @@ static int mov_to_cr(int gpreg, int cr, 
         } else {
             if (test_bit(SVM_CPU_STATE_LMA_ENABLED,
                          &v->arch.hvm_svm.cpu_state)) {
-                svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             }
             clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
         }
@@ -2504,7 +2505,6 @@ asmlinkage void svm_vmexit_handler(struc
 
     exit_reason = vmcb->exitcode;
     save_svm_cpu_user_regs(v, &regs);
-    v->arch.hvm_svm.injecting_event = 0;
 
     vmcb->tlb_control = 1;
 
@@ -2668,7 +2668,7 @@ asmlinkage void svm_vmexit_handler(struc
         if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
             domain_pause_for_debugger();
         else 
-            svm_inject_exception(vmcb, TRAP_int3, 0, 0);
+            svm_inject_exception(v, TRAP_int3, 0, 0);
 #endif
         break;
 
@@ -2679,7 +2679,6 @@ asmlinkage void svm_vmexit_handler(struc
     case VMEXIT_EXCEPTION_GP:
         /* This should probably not be trapped in the future */
         regs.error_code = vmcb->exitinfo1;
-        v->arch.hvm_svm.injecting_event = 1;
         svm_do_general_protection_fault(v, &regs);
         break;  
 
@@ -2699,9 +2698,8 @@ asmlinkage void svm_vmexit_handler(struc
 //printk("PF1\n");
         if (!(error = svm_do_page_fault(va, &regs))) 
         {
-            v->arch.hvm_svm.injecting_event = 1;
             /* Inject #PG using Interruption-Information Fields */
-            svm_inject_exception(vmcb, TRAP_page_fault, 1, regs.error_code);
+            svm_inject_exception(v, TRAP_page_fault, 1, regs.error_code);
 
             v->arch.hvm_svm.cpu_cr2 = va;
             vmcb->cr2 = va;
diff -r 3d85f350a66a -r b4361ae1aabc xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Wed May 17 23:51:39 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Wed May 17 23:53:01 2006 +0100
@@ -417,7 +417,6 @@ void svm_do_launch(struct vcpu *v)
 
     v->arch.schedule_tail = arch_svm_do_resume;
 
-    v->arch.hvm_svm.injecting_event  = 0;
     v->arch.hvm_svm.saved_irq_vector = -1;
 
     svm_set_guest_time(v, 0);
diff -r 3d85f350a66a -r b4361ae1aabc xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Wed May 17 23:51:39 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Wed May 17 23:53:01 2006 +0100
@@ -440,7 +440,6 @@ struct arch_svm_struct {
     u32                 *iopm;
     u32                 *msrpm;
     u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
-    int                 injecting_event;
     int                 saved_irq_vector;
     u32                 launch_core;
     u32                 asid_core;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] SVM patch to cleanup guest event injection logic, remove unnecessary, Xen patchbot-unstable <=