[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-ia64-devel] [RFC 2/3] hvm-stub for ia64: xen



diff -r 092232fa1fbd xen/arch/ia64/vmx/Makefile
--- a/xen/arch/ia64/vmx/Makefile        Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/vmx/Makefile        Thu Nov 22 04:47:43 2007 +0100
@@ -21,3 +21,4 @@ obj-y += vacpi.o
 obj-y += vacpi.o
 obj-y += vmx_vcpu_save.o
 obj-y += save.o
+obj-y += hvm_stub.o
diff -r 092232fa1fbd xen/arch/ia64/vmx/hvm_stub.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/vmx/hvm_stub.c      Thu Nov 22 03:37:39 2007 +0100
@@ -0,0 +1,160 @@
+#include <asm/vcpu.h>
+#include <asm/vmx_vcpu.h>
+#include <asm/hvm_stub.h>
+#include <asm/dom_fw.h>
+#include <asm/debugger.h>
+
+static REGS *
+hvmstub_deliver (void)
+{
+  VCPU *vcpu = current;
+  REGS *regs = vcpu_regs(vcpu);
+  unsigned long psr = vmx_vcpu_get_psr(vcpu);
+
+  if (vcpu->vcpu_info->evtchn_upcall_mask)
+    panic_domain (NULL, "hvmstub_deliver: aleady in stub mode\n");
+
+  /* All cleared, but keep BN.  */
+  vmx_vcpu_set_psr(vcpu, IA64_PSR_MC | (psr & IA64_PSR_BN));
+
+  /* Save registers. */
+  vcpu->arch.arch_vmx.stub_saved[0] = regs->r16;
+  vcpu->arch.arch_vmx.stub_saved[1] = regs->r17;
+  vcpu->arch.arch_vmx.stub_saved[2] = regs->r18;
+  vcpu->arch.arch_vmx.stub_saved[3] = regs->r19;
+  vcpu->arch.arch_vmx.stub_saved[4] = regs->r20;
+  vcpu->arch.arch_vmx.stub_saved[5] = regs->r21;
+  vcpu->arch.arch_vmx.stub_saved[6] = regs->r22;
+  vcpu->arch.arch_vmx.stub_saved[7] = regs->r23;
+  vcpu->arch.arch_vmx.stub_saved[8] = regs->r24;
+  vcpu->arch.arch_vmx.stub_saved[9] = regs->r25;
+  vcpu->arch.arch_vmx.stub_saved[10] = regs->r26;
+  vcpu->arch.arch_vmx.stub_saved[11] = regs->r27;
+  vcpu->arch.arch_vmx.stub_saved[12] = regs->r28;
+  vcpu->arch.arch_vmx.stub_saved[13] = regs->r29;
+  vcpu->arch.arch_vmx.stub_saved[14] = regs->r30;
+  vcpu->arch.arch_vmx.stub_saved[15] = regs->r31;
+  vcpu->arch.arch_vmx.stub_nats =
+    (regs->eml_unat >> IA64_PT_REGS_R16_SLOT) & 0xffff;
+
+  /* Context. */
+  regs->r28 = regs->cr_iip;
+  regs->r29 = psr;
+  regs->r30 = regs->cr_ifs;
+
+  regs->cr_ifs = 0;    // pre-cover
+
+  regs->cr_iip = vcpu->arch.event_callback_ip;
+  regs->eml_unat &= ~(0xffffUL << IA64_PT_REGS_R16_SLOT);
+
+  /* Parameters.  */
+  regs->r16 = 0;
+  regs->r17 = vcpu->arch.arch_vmx.stub_buffer;
+
+  /* Mask events.  */
+  vcpu->vcpu_info->evtchn_upcall_mask = 1;
+
+  debugger_event(XEN_IA64_DEBUG_ON_EVENT);
+
+  return regs;
+}
+
+void
+hvmstub_callback_return (void)
+{
+  VCPU *vcpu = current;
+  REGS *regs = vcpu_regs(vcpu);
+  u64 cmd = regs->r16;
+  u64 arg1 = regs->r19;
+  u64 arg2 = regs->r20;
+  u64 arg3 = regs->r21;
+
+  if ((cmd & ~0x1UL) != 0)
+    panic_domain (NULL, "hvmstub_callback_return: bad operation (%lx)\n", cmd);
+
+  /* First restore registers.  */
+  regs->cr_iip = regs->r28;
+  regs->cr_ifs = regs->r30;
+  vmx_vcpu_set_psr (vcpu, regs->r29);
+
+  regs->eml_unat &= ~(0xffffUL << IA64_PT_REGS_R16_SLOT);
+  regs->eml_unat |= vcpu->arch.arch_vmx.stub_nats << IA64_PT_REGS_R16_SLOT;
+
+  regs->r16 = vcpu->arch.arch_vmx.stub_saved[0];
+  regs->r17 = vcpu->arch.arch_vmx.stub_saved[1];
+  regs->r18 = vcpu->arch.arch_vmx.stub_saved[2];
+  regs->r19 = vcpu->arch.arch_vmx.stub_saved[3];
+  regs->r20 = vcpu->arch.arch_vmx.stub_saved[4];
+  regs->r21 = vcpu->arch.arch_vmx.stub_saved[5];
+  regs->r22 = vcpu->arch.arch_vmx.stub_saved[6];
+  regs->r23 = vcpu->arch.arch_vmx.stub_saved[7];
+  regs->r24 = vcpu->arch.arch_vmx.stub_saved[8];
+  regs->r25 = vcpu->arch.arch_vmx.stub_saved[9];
+  regs->r26 = vcpu->arch.arch_vmx.stub_saved[10];
+  regs->r27 = vcpu->arch.arch_vmx.stub_saved[11];
+  regs->r28 = vcpu->arch.arch_vmx.stub_saved[12];
+  regs->r29 = vcpu->arch.arch_vmx.stub_saved[13];
+  regs->r30 = vcpu->arch.arch_vmx.stub_saved[14];
+  regs->r31 = vcpu->arch.arch_vmx.stub_saved[15];
+
+  /* Unmask events.  */
+  vcpu->vcpu_info->evtchn_upcall_mask = 0;
+
+  /* Then apply commands.  */
+  if (cmd & 1) {
+    emulate_io_update (vcpu, arg1, arg2, arg3);
+  }
+
+}
+
+void
+hvmstub_deliver_event (void)
+{
+  REGS *regs;
+
+  regs = hvmstub_deliver ();
+
+  regs->r16 = 0;
+}
+
+void
+hvmstub_io_emulate (unsigned long padr,
+                   unsigned long data, unsigned long data1,
+                   unsigned long word)
+{
+  REGS *regs;
+
+  regs = hvmstub_deliver ();
+  regs->r16 = 1;
+  regs->r19 = padr;
+  regs->r20 = data;
+  regs->r21 = data1;
+  regs->r22 = word;
+}
+
+void
+hvmstub_hypercall (struct pt_regs *regs)
+{
+  printk ("hvmstub_hypercall: r2=%lx r8=%lx r9=%lx\n",
+         regs->r2, regs->r8, regs->r9);
+
+  if (current->vcpu_info->evtchn_upcall_mask == 0)
+    panic_domain (NULL, "hvmstub_hypercall: not in stub mode\n");
+
+
+  switch (regs->r2 & FW_HYPERCALL_NUM_MASK_LOW)
+    {
+    case HVMSTUB_HYPERCALL_SET_CALLBACK:
+      current->arch.event_callback_ip = regs->r8;
+      current->arch.arch_vmx.stub_buffer = regs->r9;
+      break;
+    case HVMSTUB_HYPERCALL_START_FW:
+      regs->cr_iip = regs->r8;
+      vmx_vcpu_set_psr (current, regs->r9);
+      current->vcpu_info->evtchn_upcall_mask = 0;
+      break;
+    default:
+      panic_domain (NULL, "bad hvmstub hypercall\n");
+      break;
+    }
+}
diff -r 092232fa1fbd xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/vmx/mmio.c  Thu Nov 22 03:48:26 2007 +0100
@@ -39,6 +39,7 @@
 #include <asm/hvm/vacpi.h>
 #include <asm/hvm/support.h>
 #include <public/hvm/save.h>
+#include <asm/hvm_stub.h>
 
 #define HVM_BUFFERED_IO_RANGE_NR 1
 
@@ -423,6 +424,8 @@ static void mmio_access(VCPU *vcpu, u64 
     return;
 }
 
+enum inst_type_en { SL_INTEGER, SL_FLOATING, SL_FLOATING_FP8 };
+
 /*
    dir 1: read 0:write
  */
@@ -431,11 +434,13 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
     REGS *regs;
     IA64_BUNDLE bundle;
     int slot, dir=0;
-    enum { SL_INTEGER, SL_FLOATING, SL_FLOATING_FP8 } inst_type;
+    enum inst_type_en inst_type;
     size_t size;
     u64 data, data1, temp, update_reg;
     s32 imm;
     INST64 inst;
+    unsigned long update_word;
+
 
     regs = vcpu_regs(vcpu);
     if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) {
@@ -558,24 +563,54 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
              inst.inst, regs->cr_iip);
     }
 
+    update_word = size | (dir << 7) | (ma << 8) | (inst_type << 12);
+    if (dir == IOREQ_READ) {
+        if (inst_type == SL_INTEGER)
+            update_word |= (inst.M1.r1 << 16);
+        else if (inst_type == SL_FLOATING_FP8)
+            update_word |= (inst.M12.f1 << 16) | (inst.M12.f2 << 24);
+    }
+
+    if (vcpu->domain->arch.is_hvmstub) {
+        unsigned long iot;
+        iot = __gpfn_is_io(vcpu->domain, padr >> PAGE_SHIFT);
+
+        if (iot != GPFN_PIB && iot != GPFN_IOSAPIC) {
+            hvmstub_io_emulate (padr, data, data1, update_word);
+            return;
+        }
+    }
+
     if (size == 4) {
         mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir);
         size = 3;
     }
     mmio_access(vcpu, padr, &data, 1 << size, ma, dir);
 
+    emulate_io_update (vcpu, update_word, data, data1);
+}
+
+void
+emulate_io_update (VCPU *vcpu, u64 word, u64 data, u64 data1)
+{
+    int dir = (word >> 7) & 1;
+
     if (dir == IOREQ_READ) {
+        int r1 = (word >> 16) & 0xff;
+        int r2 = (word >> 24) & 0xff;
+        enum inst_type_en inst_type = (word >> 12) & 0x0f;
+
         if (inst_type == SL_INTEGER) {
-            vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
+            vcpu_set_gr(vcpu, r1, data, 0);
         } else if (inst_type == SL_FLOATING_FP8) {
             struct ia64_fpreg v;
 
             v.u.bits[0] = data;
             v.u.bits[1] = 0x1003E;
-            vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
+            vcpu_set_fpreg(vcpu, r1, &v);
             v.u.bits[0] = data1;
             v.u.bits[1] = 0x1003E;
-            vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
+            vcpu_set_fpreg(vcpu, r2, &v);
         } else {
             panic_domain(NULL, "Don't support ldfd now !");
         }
diff -r 092232fa1fbd xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c     Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/vmx/vmx_fault.c     Thu Nov 22 03:37:39 2007 +0100
@@ -42,7 +42,6 @@
 #include <asm/privop.h>
 #include <asm/ia64_int.h>
 #include <asm/debugger.h>
-//#include <asm/hpsim_ssc.h>
 #include <asm/dom_fw.h>
 #include <asm/vmx_vcpu.h>
 #include <asm/kregs.h>
@@ -52,6 +51,8 @@
 #include <asm/vmx_phy_mode.h>
 #include <xen/mm.h>
 #include <asm/vmx_pal.h>
+#include <asm/hvm_stub.h>
+
 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
 
@@ -182,31 +183,36 @@ vmx_ia64_handle_break (unsigned long ifa
         if (iim == 0)
             show_registers(regs);
         debugger_trap_fatal(0 /* don't care */, regs);
-    } else
+    }
 #endif
-    {
-        if (iim == 0) 
-            vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
-
-        if (ia64_psr(regs)->cpl == 0) {
-            /* Allow hypercalls only when cpl = 0.  */
-            if (iim == d->arch.breakimm) {
-                ia64_hypercall(regs);
-                vcpu_increment_iip(v);
+    if (iim == 0) 
+        vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
+
+    if (ia64_psr(regs)->cpl == 0) {
+        /* Allow hypercalls only when cpl = 0.  */
+        if (iim == d->arch.breakimm) {
+            ia64_hypercall(regs);
+            vcpu_increment_iip(v);
+            return IA64_NO_FAULT;
+        }
+        if (iim == DOMN_PAL_REQUEST) {
+            pal_emul(v);
+            vcpu_increment_iip(v);
+            return IA64_NO_FAULT;
+        }
+        if (iim == DOMN_SAL_REQUEST) {
+            sal_emul(v);
+            vcpu_increment_iip(v);
+            return IA64_NO_FAULT;
+        }
+        if (d->arch.is_hvmstub) {
+            if (iim == HVMSTUB_HYPERPRIVOP_CALLBACK_RETURN) {
+                hvmstub_callback_return ();
                 return IA64_NO_FAULT;
             }
-            else if (iim == DOMN_PAL_REQUEST) {
-                pal_emul(v);
-                vcpu_increment_iip(v);
-                return IA64_NO_FAULT;
-            } else if (iim == DOMN_SAL_REQUEST) {
-                sal_emul(v);
-                vcpu_increment_iip(v);
-                return IA64_NO_FAULT;
-            }
-        }
-        vmx_reflect_interruption(ifa, isr, iim, 11, regs);
-    }
+        }
+    }
+    vmx_reflect_interruption(ifa, isr, iim, 11, regs);
     return IA64_NO_FAULT;
 }
 
@@ -215,10 +221,11 @@ void save_banked_regs_to_vpd(VCPU *v, RE
 {
     unsigned long i=0UL, * src,* dst, *sunat, *dunat;
     IA64_PSR vpsr;
-    src=&regs->r16;
-    sunat=&regs->eml_unat;
+
+    src = &regs->r16;
+    sunat = &regs->eml_unat;
     vpsr.val = VCPU(v, vpsr);
-    if(vpsr.bn){
+    if (vpsr.bn) {
         dst = &VCPU(v, vgr[0]);
         dunat =&VCPU(v, vnat);
         __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
@@ -226,7 +233,8 @@ void save_banked_regs_to_vpd(VCPU *v, RE
                             st8 [%3] = %2;;"
        
::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
 
-    }else{
+    }
+    else {
         dst = &VCPU(v, vbgr[0]);
 //        dunat =&VCPU(v, vbnat);
 //        __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
@@ -235,7 +243,7 @@ void save_banked_regs_to_vpd(VCPU *v, RE
 //       
::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
 
     }
-    for(i=0; i<16; i++)
+    for (i = 0; i < 16; i++)
         *dst++ = *src++;
 }
 
@@ -248,59 +256,62 @@ void leave_hypervisor_tail(void)
     struct domain *d = current->domain;
     struct vcpu *v = current;
 
+    /* FIXME: can this happen ?  */
+    if (is_idle_domain(current->domain))
+        return;
+
+    if (d->arch.is_hvmstub) {
+        if (local_events_need_delivery()) {
+            hvmstub_deliver_event ();
+        }
+    } else if (v->vcpu_id == 0) {
+        unsigned long callback_irq =
+            d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
+        
+        if ( v->arch.arch_vmx.pal_init_pending ) {
+            /*inject INIT interruption to guest pal*/
+            v->arch.arch_vmx.pal_init_pending = 0;
+            deliver_pal_init(v);
+            return;
+        }
+
+        /*
+         * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
+         *                  Domain = val[47:32], Bus  = val[31:16],
+         *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
+         * val[63:56] == 0: val[55:0] is a delivery as GSI
+         */
+        if (callback_irq != 0 && local_events_need_delivery()) {
+            /* change level for para-device callback irq */
+            /* use level irq to send discrete event */
+            if ((uint8_t)(callback_irq >> 56) == 1) {
+                /* case of using PCI INTx line as callback irq */
+                int pdev = (callback_irq >> 11) & 0x1f;
+                int pintx = callback_irq & 3;
+                viosapic_set_pci_irq(d, pdev, pintx, 1);
+                viosapic_set_pci_irq(d, pdev, pintx, 0);
+            } else {
+                /* case of using GSI as callback irq */
+                viosapic_set_irq(d, callback_irq, 1);
+                viosapic_set_irq(d, callback_irq, 0);
+            }
+        }
+    }
+
+    rmb();
+    if (xchg(&v->arch.irq_new_pending, 0)) {
+        v->arch.irq_new_condition = 0;
+        vmx_check_pending_irq(v);
+    }
+    else if (v->arch.irq_new_condition) {
+        v->arch.irq_new_condition = 0;
+        vhpi_detection(v);
+    }
+
     // FIXME: Will this work properly if doing an RFI???
-    if (!is_idle_domain(d) ) { // always comes from guest
-//        struct pt_regs *user_regs = vcpu_regs(current);
-        local_irq_enable();
-        do_softirq();
-        local_irq_disable();
-
-        if (v->vcpu_id == 0) {
-            unsigned long callback_irq =
-                d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
-
-            if ( v->arch.arch_vmx.pal_init_pending ) {
-                /*inject INIT interruption to guest pal*/
-                v->arch.arch_vmx.pal_init_pending = 0;
-                deliver_pal_init(v);
-                return;
-            }
-
-            /*
-             * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
-             *                  Domain = val[47:32], Bus  = val[31:16],
-             *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
-             * val[63:56] == 0: val[55:0] is a delivery as GSI
-             */
-            if (callback_irq != 0 && local_events_need_delivery()) {
-                /* change level for para-device callback irq */
-                /* use level irq to send discrete event */
-                if ((uint8_t)(callback_irq >> 56) == 1) {
-                    /* case of using PCI INTx line as callback irq */
-                    int pdev = (callback_irq >> 11) & 0x1f;
-                    int pintx = callback_irq & 3;
-                    viosapic_set_pci_irq(d, pdev, pintx, 1);
-                    viosapic_set_pci_irq(d, pdev, pintx, 0);
-                } else {
-                    /* case of using GSI as callback irq */
-                    viosapic_set_irq(d, callback_irq, 1);
-                    viosapic_set_irq(d, callback_irq, 0);
-                }
-            }
-        }
-
-        rmb();
-        if (xchg(&v->arch.irq_new_pending, 0)) {
-            v->arch.irq_new_condition = 0;
-            vmx_check_pending_irq(v);
-            return;
-        }
-
-        if (v->arch.irq_new_condition) {
-            v->arch.irq_new_condition = 0;
-            vhpi_detection(v);
-        }
-    }
+    local_irq_enable();
+    do_softirq();
+    local_irq_disable();
 }
 
 static int vmx_handle_lds(REGS* regs)
diff -r 092232fa1fbd xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Thu Nov 22 03:37:39 2007 +0100
@@ -47,15 +47,19 @@ static int hvmop_set_isa_irq_level(
     if ( copy_from_guest(&op, uop, 1) )
         return -EFAULT;
 
-    if ( !IS_PRIV(current->domain) )
-        return -EPERM;
-
     if ( op.isa_irq > 15 )
         return -EINVAL;
 
-    d = get_domain_by_id(op.domid);
-    if ( d == NULL )
-        return -ESRCH;
+    if ( op.domid == DOMID_SELF ) {
+        d = get_current_domain();
+    }
+    else {
+        if ( !IS_PRIV(current->domain) )
+            return -EPERM;
+        d = get_domain_by_id(op.domid);
+        if ( d == NULL )
+            return -ESRCH;
+    }
 
     rc = -EINVAL;
     if ( !is_hvm_domain(d) )
@@ -79,15 +83,19 @@ static int hvmop_set_pci_intx_level(
     if ( copy_from_guest(&op, uop, 1) )
         return -EFAULT;
 
-    if ( !IS_PRIV(current->domain) )
-        return -EPERM;
-
     if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
         return -EINVAL;
 
-    d = get_domain_by_id(op.domid);
-    if ( d == NULL )
-        return -ESRCH;
+    if ( op.domid == DOMID_SELF ) {
+        d = get_current_domain();
+    }
+    else {
+        if ( !IS_PRIV(current->domain) )
+            return -EPERM;
+        d = get_domain_by_id(op.domid);
+        if ( d == NULL )
+            return -ESRCH;
+    }
 
     rc = -EINVAL;
     if ( !is_hvm_domain(d) )
diff -r 092232fa1fbd xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/vmx/vmx_init.c      Thu Nov 22 04:35:11 2007 +0100
@@ -404,9 +404,11 @@ vmx_final_setup_guest(struct vcpu *v)
        if (rc)
                return rc;
 
-       rc = vmx_create_event_channels(v);
-       if (rc)
-               return rc;
+    if (!v->domain->arch.is_hvmstub) {
+        rc = vmx_create_event_channels(v);
+        if (rc)
+            return rc;
+    }
 
        /* v->arch.schedule_tail = arch_vmx_do_launch; */
        vmx_create_vp(v);
@@ -434,14 +436,16 @@ vmx_relinquish_guest_resources(struct do
 {
        struct vcpu *v;
 
-       for_each_vcpu(d, v)
-               vmx_release_assist_channel(v);
-
-       vacpi_relinquish_resources(d);
-
-       vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq);
-       vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
-       vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
+    if (!d->arch.is_hvmstub) {
+        for_each_vcpu(d, v)
+            vmx_release_assist_channel(v);
+
+        vacpi_relinquish_resources(d);
+
+        vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq);
+        vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
+        vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
+    }
 }
 
 void
@@ -491,10 +495,11 @@ int vmx_setup_platform(struct domain *d)
 
        vmx_build_io_physmap_table(d);
 
-       vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq);
-       vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
-       vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
-
+    if (!d->arch.is_hvmstub) {
+        vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq);
+        vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
+        vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
+    }
        /* TEMP */
        d->arch.vmx_platform.pib_base = 0xfee00000UL;
 
@@ -509,7 +514,14 @@ int vmx_setup_platform(struct domain *d)
        /* Initialize iosapic model within hypervisor */
        viosapic_init(d);
 
-       vacpi_init(d);
+    if (!d->arch.is_hvmstub)
+        vacpi_init(d);
+
+    if (d->arch.is_hvmstub) {
+        int i;
+        for ( i = 1; i < MAX_VIRT_CPUS; i++ )
+            d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
+    }
 
        return 0;
 }
@@ -521,25 +533,27 @@ void vmx_do_resume(struct vcpu *v)
        vmx_load_all_rr(v);
        migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
 
-       /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
-       /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-       p = &get_vio(v)->vp_ioreq;
-       while (p->state != STATE_IOREQ_NONE) {
-               switch (p->state) {
-               case STATE_IORESP_READY: /* IORESP_READY -> NONE */
-                       vmx_io_assist(v);
-                       break;
-               case STATE_IOREQ_READY:
-               case STATE_IOREQ_INPROCESS:
-                       /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
-                       wait_on_xen_event_channel(v->arch.arch_vmx.xen_port,
-                                         (p->state != STATE_IOREQ_READY) &&
-                                         (p->state != STATE_IOREQ_INPROCESS));
-                       break;
-               default:
-                       gdprintk(XENLOG_ERR,
-                                "Weird HVM iorequest state %d.\n", p->state);
-                       domain_crash_synchronous();
-               }
-       }
-}
+    if (!v->domain->arch.is_hvmstub) {
+        /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
+        /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
+        p = &get_vio(v)->vp_ioreq;
+        while (p->state != STATE_IOREQ_NONE) {
+            switch (p->state) {
+            case STATE_IORESP_READY: /* IORESP_READY -> NONE */
+                vmx_io_assist(v);
+                break;
+            case STATE_IOREQ_READY:
+            case STATE_IOREQ_INPROCESS:
+                /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
+                wait_on_xen_event_channel(v->arch.arch_vmx.xen_port,
+                                          (p->state != STATE_IOREQ_READY) &&
+                                          (p->state != STATE_IOREQ_INPROCESS));
+                break;
+            default:
+                gdprintk(XENLOG_ERR,
+                         "Weird HVM iorequest state %d.\n", p->state);
+                domain_crash_synchronous();
+            }
+        }
+    }
+}
diff -r 092232fa1fbd xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Thu Nov 22 03:37:39 2007 +0100
@@ -472,7 +472,7 @@ ENTRY(vmx_break_fault)
     ;;
     ld4 r22=[r22]
     extr.u r24=r29,IA64_PSR_CPL0_BIT,2
-    cmp.eq p0,p6=r0,r0
+    cmp.ltu p6,p0=NR_hypercalls,r2
     ;;
     cmp.ne.or p6,p0=r22,r17
     cmp.ne.or p6,p0=r0,r24
diff -r 092232fa1fbd xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Nov 22 03:37:39 2007 +0100
@@ -35,6 +35,7 @@
 #define SW_BAD    0   /* Bad mode transitition */
 #define SW_V2P_DT 1   /* Physical emulation is activated */
 #define SW_V2P_D  2   /* Physical emulation is activated (only for data) */
+#define SW_P2P_D  2   /* Physical emulation is activated (only for data) */
 #define SW_P2V    3   /* Exit physical mode emulation */
 #define SW_SELF   4   /* No mode transition */
 #define SW_NOP    5   /* Mode transition, but without action required */
@@ -60,7 +61,7 @@ static const unsigned char mm_switch_tab
      *  data access can be satisfied though itlb entry for physical
      *  emulation is hit.
      */
-    {SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
+    {SW_SELF,0,  0,  SW_NOP, 0,  SW_P2P_D,  0,  SW_P2V},
     {0,  0,  0,  0,  0,  0,  0,  0},
     {0,  0,  0,  0,  0,  0,  0,  0},
     /*
@@ -79,7 +80,7 @@ static const unsigned char mm_switch_tab
      *  from the low level TLB miss handlers.
      *  (see "arch/ia64/kernel/ivt.S")
      */
-    {0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V},
+    {SW_V2P_DT,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V},
     {0,  0,  0,  0,  0,  0,  0,  0},
     /*
      *  (it,dt,rt): (1,1,1) -> (1,0,1)
diff -r 092232fa1fbd xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c      Thu Nov 22 03:37:39 2007 +0100
@@ -104,13 +104,17 @@ long arch_do_domctl(xen_domctl_t *op, XE
                 ret = -EFAULT;
         }
         else {
-            if (ds->flags & XEN_DOMAINSETUP_hvm_guest) {
+            if (ds->flags & (XEN_DOMAINSETUP_hvm_guest
+                             | XEN_DOMAINSETUP_hvmstub_guest)) {
                 if (!vmx_enabled) {
                     printk("No VMX hardware feature for vmx domain.\n");
                     ret = -EINVAL;
                 } else {
                     d->arch.is_vti = 1;
+                    d->is_hvm = 1;
                     xen_ia64_set_convmem_end(d, ds->maxmem);
+                    if (ds->flags & XEN_DOMAINSETUP_hvmstub_guest)
+                        d->arch.is_hvmstub = 1;
                     ret = vmx_setup_platform(d);
                 }
             }
diff -r 092232fa1fbd xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/arch/ia64/xen/hypercall.c     Thu Nov 22 03:37:39 2007 +0100
@@ -32,6 +32,7 @@
 #include <public/callback.h>
 #include <xen/event.h>
 #include <xen/perfc.h>
+#include <asm/hvm_stub.h>
 
 extern long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
 extern long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg);
@@ -224,7 +225,8 @@ ia64_hypercall(struct pt_regs *regs)
                regs->r10 = fpswa_ret.err1;
                regs->r11 = fpswa_ret.err2;
                break;
-       case __HYPERVISOR_opt_feature: {
+       case __HYPERVISOR_opt_feature:
+       {
                XEN_GUEST_HANDLE(void) arg;
                struct xen_ia64_opt_feature optf;
                set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
@@ -234,6 +236,9 @@ ia64_hypercall(struct pt_regs *regs)
                        regs->r8 = -EFAULT;
                break;
        }
+       case FW_HYPERCALL_HVMSTUB:
+               hvmstub_hypercall (regs);
+               break;
        default:
                printk("unknown ia64 fw hypercall %lx\n", regs->r2);
                regs->r8 = do_ni_hypercall();
diff -r 092232fa1fbd xen/include/asm-ia64/dom_fw.h
--- a/xen/include/asm-ia64/dom_fw.h     Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/include/asm-ia64/dom_fw.h     Thu Nov 22 03:37:39 2007 +0100
@@ -168,6 +168,9 @@
 /* Set the shared_info base virtual address.  */
 #define FW_HYPERCALL_SET_SHARED_INFO_VA                        0x600UL
 
+/* Hvmstub hypercalls.  See details in hvm_stub.h  */
+#define FW_HYPERCALL_HVMSTUB                            0x800UL
+
 /* Hypercalls index bellow _FIRST_ARCH are reserved by Xen, while those above
    are for the architecture.
    Note: this limit was defined by Xen/ia64 (and not by Xen).
diff -r 092232fa1fbd xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/include/asm-ia64/domain.h     Thu Nov 22 03:37:39 2007 +0100
@@ -126,6 +126,7 @@ struct arch_domain {
         unsigned long flags;
         struct {
             unsigned int is_vti : 1;
+            unsigned int is_hvmstub : 1;
 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
             unsigned int has_pervcpu_vhpt : 1;
             unsigned int vhpt_size_log2 : 6;
diff -r 092232fa1fbd xen/include/asm-ia64/hvm_stub.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/hvm_stub.h   Thu Nov 22 03:37:39 2007 +0100
@@ -0,0 +1,23 @@
+/* Hvm stub hypercalls.  */
+
+/* Defines the callback entry point.  r8=ip, r9=data.
+   Must be called per-vcpu.  */
+#define HVMSTUB_HYPERCALL_SET_CALLBACK 0x01
+
+/* Finish stub initialization and start firmware.  r8=ip.  */
+#define HVMSTUB_HYPERCALL_START_FW 0x02
+
+/* Return from callback.  r16=0.
+   Unmask vcpu events.  */
+#define HVMSTUB_HYPERPRIVOP_CALLBACK_RETURN 0x01
+
+
+#ifdef __XEN__
+extern void hvmstub_hypercall (struct pt_regs *regs);
+extern void hvmstub_deliver_event (void);
+extern void hvmstub_callback_return (void);
+extern void hvmstub_io_emulate (unsigned long padr, unsigned long data,
+                               unsigned long data1, unsigned long word);
+
+#endif
+
diff -r 092232fa1fbd xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/include/asm-ia64/vmmu.h       Thu Nov 22 03:37:39 2007 +0100
@@ -215,6 +215,7 @@ extern void machine_tlb_purge(u64 va, u6
 extern void machine_tlb_purge(u64 va, u64 ps);
 extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE 
*pbundle);
 extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma);
+extern void emulate_io_update (struct vcpu *vcpu, u64 word, u64 d, u64 d1);
 extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
 extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va);
 extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
diff -r 092232fa1fbd xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h    Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/include/asm-ia64/vmx_vpd.h    Thu Nov 22 03:37:39 2007 +0100
@@ -64,6 +64,9 @@ struct arch_vmx_struct {
     unsigned long  ivt_current;
     struct ivt_debug ivt_debug[IVT_DEBUG_MAX];
 #endif
+    unsigned long stub_saved[16];
+    unsigned long stub_buffer;
+    unsigned int  stub_nats;
 };
 
 #define VMX_DOMAIN(v)   v->arch.arch_vmx.flags
diff -r 092232fa1fbd xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Thu Nov 22 03:34:09 2007 +0100
+++ b/xen/include/public/domctl.h       Thu Nov 22 03:37:39 2007 +0100
@@ -373,6 +373,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_hyper
 #define XEN_DOMAINSETUP_hvm_guest  (1UL<<_XEN_DOMAINSETUP_hvm_guest)
 #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save)  */
 #define XEN_DOMAINSETUP_query  (1UL<<_XEN_DOMAINSETUP_query)
+#define _XEN_DOMAINSETUP_hvmstub_guest 2
+#define XEN_DOMAINSETUP_hvmstub_guest  (1UL<<_XEN_DOMAINSETUP_hvmstub_guest)
 typedef struct xen_domctl_arch_setup {
     uint64_aligned_t flags;  /* XEN_DOMAINSETUP_* */
 #ifdef __ia64__

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.