[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH]vbd/vnif paravirtulization driver hypervisorsupport]



It's now all use shadow_mode_external, and use a permit bitmap for hypercall from vmx domain.
Do you think it's now acceptable?
It's against 1657.

Keir Fraser wrote:

On 25 May 2005, at 07:26, Ling, Xiaofeng wrote:

Or we use shadow_mode_external() to separate the path?


That would make sense. Also push the test into the copy routines themselves.

entry.S is a big mess now. Consider pulling *all* the vmx stuff out into vmx.S.

 -- Keir


===== xen/arch/x86/domain.c 1.206 vs edited =====
--- 1.206/xen/arch/x86/domain.c 2005-06-03 05:05:30 +08:00
+++ edited/xen/arch/x86/domain.c        2005-06-03 10:24:26 +08:00
@@ -252,6 +252,7 @@
     v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
         l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
 
+    v->arch.callback_irq = 0;
     v->arch.guest_vtable  = __linear_l2_table;
     v->arch.shadow_vtable = __shadow_linear_l2_table;
 
===== xen/arch/x86/mm.c 1.174 vs edited =====
--- 1.174/xen/arch/x86/mm.c     2005-06-03 05:05:30 +08:00
+++ edited/xen/arch/x86/mm.c    2005-06-03 10:13:09 +08:00
@@ -1975,23 +1975,35 @@
     perfc_addc(num_page_updates, count);
     perfc_incr_histo(bpt_updates, count, PT_UPDATES);
 
-    if ( unlikely(!array_access_ok(ureqs, count, sizeof(req))) )
+    if(likely(!shadow_mode_external(current->domain)) &&
+             unlikely(!array_access_ok(ureqs, count, sizeof(req))) )
     {
+
         rc = -EFAULT;
         goto out;
+
     }
 
     for ( i = 0; i < count; i++ )
     {
-        if ( hypercall_preempt_check() )
+        if(shadow_mode_external(current->domain))
         {
-            rc = hypercall4_create_continuation(
-                __HYPERVISOR_mmu_update, ureqs, 
-                (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
-            break;
+
+             rc = copy_from_guest(&req, ureqs, sizeof(req));
         }
+        else
+        {
+            if ( hypercall_preempt_check() )
+            {
+                rc = hypercall4_create_continuation(
+                        __HYPERVISOR_mmu_update, ureqs, 
+                        (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
+                break;
+            }
+            rc = __copy_from_user(&req, ureqs, sizeof(req));
 
-        if ( unlikely(__copy_from_user(&req, ureqs, sizeof(req)) != 0) )
+        }
+        if ( unlikely(rc) != 0) 
         {
             MEM_LOG("Bad __copy_from_user");
             rc = -EFAULT;
@@ -2140,7 +2152,8 @@
                 break;
             }
 
-            if ( unlikely(shadow_mode_translate(FOREIGNDOM) && !IS_PRIV(d)) )
+            if ( unlikely(shadow_mode_translate(FOREIGNDOM) && !IS_PRIV(d) 
+                          && !shadow_mode_external(FOREIGNDOM)) )
             {
                 MEM_LOG("can't mutate the m2p of translated guests");
                 break;
@@ -2247,7 +2260,6 @@
     return rc;
 }
 
-
 int do_update_va_mapping(unsigned long va,
                          unsigned long val32,
                          unsigned long flags)
@@ -2271,9 +2283,12 @@
     if ( unlikely(shadow_mode_enabled(d)) )
         check_pagetable(v, "pre-va"); /* debug */
 
-    if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
-                                val)) )
-        rc = -EINVAL;
+    if ( !shadow_mode_external(d) )
+    {
+        if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
+                        val)) )
+            rc = -EINVAL;
+    }
 
     if ( likely(rc == 0) && unlikely(shadow_mode_enabled(d)) )
     {
===== xen/arch/x86/shadow.c 1.118 vs edited =====
--- 1.118/xen/arch/x86/shadow.c 2005-06-03 05:05:30 +08:00
+++ edited/xen/arch/x86/shadow.c        2005-06-03 10:17:49 +08:00
@@ -2755,6 +2755,7 @@
     struct domain *d = v->domain;
     l1_pgentry_t spte;
     int rc = 0;
+    unsigned long gpa, mfn;
 
     shadow_lock(d);
 
@@ -2766,7 +2767,30 @@
     //
     __shadow_sync_va(v, va);
 
-    l1pte_propagate_from_guest(d, val, &spte);
+     
+    if(!shadow_mode_external(v->domain))
+    {
+        l1pte_propagate_from_guest(d, val, &spte);
+    }
+    else
+    {
+        gpa = gva_to_gpa(va);
+        mfn = l1e_get_pfn(val);
+        if(gpa) 
+        {
+            if(l1e_get_intpte(val))
+            {
+                set_phystomachine(gpa >> PAGE_SHIFT, 
+                        mfn);
+            }
+            else
+                set_phystomachine(gpa >> PAGE_SHIFT, INVALID_MFN);
+        }
+
+        spte = val;
+
+    }
+
     shadow_set_l1e(va, spte, 0);
 
     /*
===== xen/arch/x86/vmx.c 1.65 vs edited =====
--- 1.65/xen/arch/x86/vmx.c     2005-06-03 05:05:31 +08:00
+++ edited/xen/arch/x86/vmx.c   2005-06-03 10:27:42 +08:00
@@ -37,6 +37,7 @@
 #include <asm/vmx_vmcs.h>
 #include <asm/vmx_intercept.h>
 #include <asm/shadow.h>
+#include <asm/bitops.h>
 #include <public/io/ioreq.h>
 
 #ifdef CONFIG_VMX
@@ -1046,9 +1047,12 @@
 char print_buf[BUF_SIZ];
 static int index;
 
-static void vmx_print_line(const char c, struct vcpu *d) 
+asmlinkage unsigned long do_vmx_print_line(unsigned long ch) 
 {
 
+#if VMX_DEBUG
+    char c = (char)ch;
+    struct vcpu *d = current;
     if (index == MAX_LINE || c == '\n') {
         if (index == MAX_LINE) {
             print_buf[index++] = c;
@@ -1059,7 +1063,55 @@
     }
     else
         print_buf[index++] = c;
+    return 0;
+#endif
+    return -ENOSYS;
+}
+
+unsigned char vmx_hypercall_permit[NR_hypercalls/sizeof(unsigned char)] = 
+{
+    0x2,     /* do_mmu_update */
+    0x70,    /* do_dom_mem_op          12
+                do_multicall           13
+                do_update_va_mapping   14
+             */
+    0x13,    /* do_event_channel_op 16
+                do_xen_version      17
+                do_grant_table_op   20 
+             */
+    0x10     /* do_virtual_device_op 28*/
+};
+#if defined(__i386__)
+void vmx_do_hypercall(struct cpu_user_regs *pregs)
+{
+    unsigned long retcode;
+
+    /* Check whether this hypercall is permited from vmx domain*/
+    if(unlikely(!test_bit(pregs->eax, &vmx_hypercall_permit[0]))){
+        printk("not permit hypercall, %d\n", pregs->eax);
+        return;
+    }
+    __asm__ __volatile__(
+        "pushl %6\n\t"
+        "pushl %5\n\t"
+        "pushl %4\n\t"
+        "pushl %3\n\t"
+        "pushl %2\n\t"
+        "call *(hypercall_table)(,%0,4)\n\t"
+        "addl $20, %%esp\n\t"
+        :"=&a"(retcode)
+        :"0"(pregs->eax), "r"(pregs->ebx), "r"(pregs->ecx),
+         "r"(pregs->edx), "r"(pregs->esi), "r"(pregs->edi)
+    );
+    pregs->eax = retcode;
+    return;
+}
+#else
+void vmx_do_hypercall(struct cpu_user_regs *pregs)
+{
+    printk("not supported yet!\n");
 }
+#endif
 
 void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
 {
@@ -1300,7 +1352,7 @@
         __vmread(GUEST_EIP, &eip);
         __vmread(EXIT_QUALIFICATION, &exit_qualification);
 
-        vmx_print_line(regs.eax, v); /* provides the current domain */
+        vmx_do_hypercall(&regs);
         __update_guest_eip(inst_len);
         break;
     case EXIT_REASON_CR_ACCESS:
@@ -1364,6 +1416,56 @@
 #endif
 
 }
+
+int do_update_va_mapping(unsigned long va,
+                         l1_pgentry_t  val, 
+                         unsigned long flags);
+/* 
+ * The va must be a page start address
+ */
+int map_sharepage_to_guest(unsigned long gva, unsigned long shared)
+{
+    l1_pgentry_t val, gpte; 
+    gpte = gva_to_gpte(gva);
+    val = l1e_from_paddr((__pa(shared)), l1e_get_flags(gpte));
+    return do_update_va_mapping(gva, val, 0);
+}
+  
+asmlinkage unsigned long do_virtual_device_op(unsigned long op, 
+                                              unsigned long arg1, 
+                                              unsigned arg2)
+{
+    switch (op) 
+    {
+        case SET_SHAREINFO_MAP:
+            return map_sharepage_to_guest(arg1, 
+                        (unsigned long)current->domain->shared_info);
+        case SET_CALLBACK_IRQ:
+            if(arg1)
+                current->arch.callback_irq = 0x20+arg1;
+            else
+                current->arch.callback_irq = 0;
+            return 0;
+        case ADDR_MACHTOPHYS: 
+        {
+            unsigned long phys = 
+                __mfn_to_gpfn(current->domain, arg1 >> PAGE_SHIFT);
+            phys = (phys << PAGE_SHIFT) | (arg1 & ~PAGE_MASK);
+            return phys;
+        }
+        case ADDR_PHYSTOMACH:
+        {
+            unsigned long machine = 
+                __gpfn_to_mfn(current->domain, arg1 >> PAGE_SHIFT);
+            machine = (machine << PAGE_SHIFT) | (arg1 & ~PAGE_MASK);
+            return machine;
+        }
+        default:
+            printk("Not supported virtual device operation\n");
+    }
+    return 0L;
+}
+
 
 #endif /* CONFIG_VMX */
 
===== xen/arch/x86/vmx_intercept.c 1.12 vs edited =====
--- 1.12/xen/arch/x86/vmx_intercept.c   2005-06-03 05:05:31 +08:00
+++ edited/xen/arch/x86/vmx_intercept.c 2005-06-03 10:28:09 +08:00
@@ -28,6 +28,7 @@
 #include <xen/lib.h>
 #include <xen/sched.h>
 #include <asm/current.h>
+#include <xen/event.h>
 
 #ifdef CONFIG_VMX
 
@@ -196,6 +197,7 @@
     /* Set the pending intr bit, and send evtchn notification to myself. */
     if (test_and_set_bit(vpit->vector, vpit->intr_bitmap))
         vpit->pending_intr_nr++; /* already set, then count the pending intr */
+    evtchn_set_pending(vpit->v, IOPACKET_PORT);
 
     set_ac_timer(&vpit->pit_timer, NOW() + MILLISECS(vpit->period));
 }
@@ -247,6 +249,7 @@
         }
 
         vpit->intr_bitmap = intr;
+        vpit->v = d;
 
         /* set up the actimer */
         init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, 0);
===== xen/arch/x86/vmx_io.c 1.31 vs edited =====
--- 1.31/xen/arch/x86/vmx_io.c  2005-06-03 05:25:14 +08:00
+++ edited/xen/arch/x86/vmx_io.c        2005-06-03 10:33:20 +08:00
@@ -417,11 +417,53 @@
     return ((eflags & X86_EFLAGS_IF) == 0);
 }
 
+int vmx_event_to_irq(struct vcpu *v) 
+{
+    vcpu_iodata_t *vio;
+
+    if(unlikely(v->arch.callback_irq == 0)) {
+        printk("try to inject callback =0!!!\n");
+        printk("pending: %x, sel: %x, pending[0]:%x\n", 
+               v->vcpu_info->evtchn_upcall_pending,
+               v->vcpu_info->evtchn_pending_sel,
+               v->domain->shared_info->evtchn_pending[0]);  
+        return 0;
+    }
+    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
+    if (vio == 0) {
+        VMX_DBG_LOG(DBG_LEVEL_VBD, 
+                "bad shared page: %lx\n", (unsigned long) vio);
+        domain_crash();
+    }
+    /*
+     * the event is only for guest, just set callback interrupt 
+     * bit  and return
+     */
+    return test_and_set_bit(v->arch.callback_irq, &vio->vp_intr[0]);
+
+}
+
+void vmx_check_guest_event(struct vcpu *v)
+{
+    if (!v->domain->shared_info->evtchn_pending[IOPACKET_PORT>>5])
+        clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
+
+    /* Note: VMX domains may need upcalls as well */
+    if (!v->vcpu_info->evtchn_pending_sel) 
+        v->vcpu_info->evtchn_upcall_pending = 0;
+
+    if(event_pending(v) && !v->vcpu_info->callback_mask &&
+       !test_bit(IOPACKET_PORT, &v->domain->shared_info->evtchn_pending[0]) ) 
+        vmx_event_to_irq(v);
+}
+
 void vmx_intr_assist(struct vcpu *d) 
 {
     int highest_vector = find_highest_pending_irq(d);
     unsigned long intr_fields, eflags;
     struct vmx_virpit_t *vpit = &(d->arch.arch_vmx.vmx_platform.vmx_pit);
+
+    vmx_check_guest_event(d); /*inject para-device call back irq*/
 
     if (highest_vector == -1)
         return;
===== xen/arch/x86/x86_32/entry.S 1.112 vs edited =====
--- 1.112/xen/arch/x86/x86_32/entry.S   2005-06-03 05:05:31 +08:00
+++ edited/xen/arch/x86/x86_32/entry.S  2005-06-03 10:13:27 +08:00
@@ -735,7 +735,7 @@
         .long do_set_debugreg
         .long do_get_debugreg
         .long do_update_descriptor  /* 10 */
-        .long do_ni_hypercall
+        .long do_vmx_print_line
         .long do_dom_mem_op
         .long do_multicall
         .long do_update_va_mapping
@@ -751,6 +751,9 @@
         .long do_boot_vcpu
         .long do_ni_hypercall       /* 25 */
         .long do_mmuext_op
+        .long do_ni_hypercall       
+        .long do_virtual_device_op       /* virutal device op for VMX */
         .rept NR_hypercalls-((.-hypercall_table)/4)
         .long do_ni_hypercall
         .endr
+
===== xen/arch/x86/x86_32/usercopy.c 1.10 vs edited =====
--- 1.10/xen/arch/x86/x86_32/usercopy.c 2005-06-03 03:54:00 +08:00
+++ edited/xen/arch/x86/x86_32/usercopy.c       2005-06-03 10:13:28 +08:00
@@ -9,6 +9,8 @@
 #include <xen/config.h>
 #include <xen/lib.h>
 #include <asm/uaccess.h>
+#include <asm/domain_page.h>
+#include <asm/shadow.h>
 
 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned 
long n)
 {
@@ -395,6 +397,98 @@
        return n;
 }
 
+void* map_domain_vaddr(void * guest_vaddr, unsigned long len)
+{
+    l1_pgentry_t gpte;
+    unsigned long mfn;
+    unsigned long ma;
+    void * vstart;
+    
+    if (len > PAGE_SIZE) 
+    {
+        return NULL;
+    }
+ 
+    if (((unsigned long)guest_vaddr & PAGE_MASK) == 
+        (((unsigned long)guest_vaddr + len -1) & PAGE_MASK)) 
+    {
+        gpte = gva_to_gpte((unsigned long)guest_vaddr);
+        mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
+        ma = (mfn << PAGE_SHIFT) | 
+             ((unsigned long)guest_vaddr & (PAGE_SIZE - 1));
+        vstart = (void *)map_domain_mem(ma);
+    }
+    else 
+    {
+        return NULL;
+    }
+    return vstart;
+}
+
+unsigned long
+copy_from_guest(void *to, const void __user *from, unsigned long n)
+{
+    void *hfrom;    
+    unsigned long ncopy;
+    int nleft;
+    ncopy = (((unsigned long)from  + PAGE_SIZE) & PAGE_MASK) - 
+            (unsigned long)from;
+    ncopy = ncopy > n ? n : ncopy;  
+
+    for(nleft = n; nleft > 0; ncopy = nleft > PAGE_SIZE ? PAGE_SIZE : nleft) 
+    {
+        hfrom = map_domain_vaddr((void*)from, ncopy);
+        if(hfrom) 
+        {
+            memcpy(to, hfrom, ncopy);
+            unmap_domain_mem((void*)hfrom); 
+        }
+        else 
+        {
+            printk("error!, copy from guest map error, from:%p, ncopy:%ld\n", 
+                   from, ncopy);
+             return nleft;
+        }
+        nleft -= ncopy;
+        from += ncopy;
+        to += ncopy;
+    }
+    return nleft;
+}
+EXPORT_SYMBOL(copy_from_guest);
+
+unsigned long
+copy_to_guest(void __user *to, const void *from, unsigned long n)
+{
+    void *hto;  
+    unsigned long ncopy;
+    int nleft;
+
+    ncopy = (((unsigned long)to  + PAGE_SIZE) & PAGE_MASK) - (unsigned long)to;
+    ncopy = ncopy > n ? n : ncopy;  
+
+    for(nleft = n; nleft > 0; ncopy = nleft > PAGE_SIZE ? PAGE_SIZE : nleft) 
+    {
+        hto = map_domain_vaddr((void*)to, ncopy);
+        if(hto) 
+        {
+            memcpy(hto, from, ncopy);
+            unmap_domain_mem((void*)hto); 
+        }
+        else 
+        {
+            printk("error!, copy to guest map error, from:%p, ncopy:%ld\n", 
+                   from, ncopy);
+            return nleft;
+        }
+        nleft -= ncopy;
+        from += ncopy;
+        to += ncopy;
+    }
+    return nleft;
+}
+EXPORT_SYMBOL(copy_to_guest);
+
 /**
  * copy_to_user: - Copy a block of data into user space.
  * @to:   Destination address, in user space.
@@ -411,6 +505,8 @@
 unsigned long
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+    if(shadow_mode_external(current->domain))
+        return copy_to_guest(to, from, n);
        if (access_ok(to, n))
                n = __copy_to_user(to, from, n);
        return n;
@@ -435,6 +531,9 @@
 unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+
+    if(shadow_mode_external(current->domain))
+        return copy_from_guest(to, from, n);
        if (access_ok(from, n))
                n = __copy_from_user(to, from, n);
        else
===== xen/arch/x86/x86_64/usercopy.c 1.6 vs edited =====
--- 1.6/xen/arch/x86/x86_64/usercopy.c  2005-06-03 03:54:00 +08:00
+++ edited/xen/arch/x86/x86_64/usercopy.c       2005-06-03 10:13:30 +08:00
@@ -135,6 +135,21 @@
        return n;
 }
 
+unsigned long
+copy_from_guest(void *to, const void __user *from, unsigned long n)
+{
+  return n;
+}
+EXPORT_SYMBOL(copy_from_guest);
+
+unsigned long
+copy_to_guest(void __user *to, const void *from, unsigned long n)
+{
+    return n;
+}
+EXPORT_SYMBOL(copy_to_guest);
+
+
 /**
  * copy_to_user: - Copy a block of data into user space.
  * @to:   Destination address, in user space.
===== xen/common/dom_mem_ops.c 1.58 vs edited =====
--- 1.58/xen/common/dom_mem_ops.c       2005-06-03 05:24:41 +08:00
+++ edited/xen/common/dom_mem_ops.c     2005-06-03 10:13:31 +08:00
@@ -82,15 +82,21 @@
     struct pfn_info *page;
     unsigned long    i, j, mpfn;
 
-    if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+    if ( !shadow_mode_external(current->domain) && 
+         unlikely(!array_access_ok(extent_list, nr_extents, 
+                                   sizeof(*extent_list))) )
         return start_extent;
 
     for ( i = start_extent; i < nr_extents; i++ )
     {
-        PREEMPT_CHECK(MEMOP_decrease_reservation);
-
-        if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
-            return i;
+        if( shadow_mode_external(current->domain)) {
+            if(copy_from_guest(&mpfn, &extent_list[i], sizeof(mpfn)) != 0)
+                return i;
+        } else {
+            PREEMPT_CHECK(MEMOP_decrease_reservation);
+           if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
+                return i;
+        }
 
         for ( j = 0; j < (1 << extent_order); j++ )
         {
@@ -102,6 +108,7 @@
             }
             
             page = &frame_table[mpfn + j];
+
             if ( unlikely(!get_page(page, d)) )
             {
                 DPRINTK("Bad page free for domain %u\n", d->domain_id);
===== xen/common/grant_table.c 1.46 vs edited =====
--- 1.46/xen/common/grant_table.c       2005-06-03 05:05:31 +08:00
+++ edited/xen/common/grant_table.c     2005-06-03 10:13:32 +08:00
@@ -160,7 +160,10 @@
 
         /* rmb(); */ /* not on x86 */
 
-        frame = __gpfn_to_mfn_foreign(granting_d, sha->frame);
+        if(!shadow_mode_translate(granting_d))
+            frame = __gpfn_to_mfn_foreign(granting_d, sha->frame);
+        else
+            frame = sha->frame;
 
         if ( unlikely(!pfn_valid(frame)) ||
              unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ?
@@ -669,7 +672,8 @@
     {
         DPRINTK("Xen only supports up to %d grant-table frames per domain.\n",
                 NR_GRANT_FRAMES);
-        (void)put_user(GNTST_general_error, &uop->status);
+        op.status = GNTST_general_error;
+        (void)copy_to_user(uop, &op, sizeof(op));
         return 0;
     }
 
@@ -679,25 +683,44 @@
     }
     else if ( unlikely(!IS_PRIV(current->domain)) )
     {
-        (void)put_user(GNTST_permission_denied, &uop->status);
+        op.status = GNTST_permission_denied;
+        (void)copy_to_user(uop, &op, sizeof(op));
         return 0;
     }
 
     if ( unlikely((d = find_domain_by_id(op.dom)) == NULL) )
     {
         DPRINTK("Bad domid %d.\n", op.dom);
-        (void)put_user(GNTST_bad_domain, &uop->status);
+        op.status = GNTST_bad_domain;
+        (void)copy_to_user(uop, &op, sizeof(op));
         return 0;
     }
 
     if ( op.nr_frames <= NR_GRANT_FRAMES )
     {
         ASSERT(d->grant_table != NULL);
-        (void)put_user(GNTST_okay, &uop->status);
-        for ( i = 0; i < op.nr_frames; i++ )
+        if(!shadow_mode_external(current->domain))
+        {
+            (void)put_user(GNTST_okay, &uop->status);
+            for ( i = 0; i < op.nr_frames; i++ )
             (void)put_user(
                 (virt_to_phys(d->grant_table->shared) >> PAGE_SHIFT) + i,
                 &uop->frame_list[i]);
+        }
+        else
+        {
+            op.status = GNTST_okay;
+            for ( i = 0; i < op.nr_frames; i++ )
+            {
+               if(map_sharepage_to_guest((unsigned long)op.frame_list + i * 
PAGE_SIZE, (unsigned long)d->grant_table->shared + i * PAGE_SIZE))
+               {
+     
+                   op.status = GNTST_general_error;
+                   break;
+               }
+            }
+            (void)copy_to_user(uop, &op, sizeof(op));
+        }
     }
 
     put_domain(d);
===== xen/common/multicall.c 1.12 vs edited =====
--- 1.12/xen/common/multicall.c 2005-06-03 05:24:41 +08:00
+++ edited/xen/common/multicall.c       2005-06-03 10:14:26 +08:00
@@ -12,6 +12,7 @@
 #include <xen/multicall.h>
 #include <asm/current.h>
 #include <asm/hardirq.h>
+#include <asm/shadow.h>
 
 struct mc_state mc_state[NR_CPUS];
 
@@ -19,6 +20,7 @@
 {
     struct mc_state *mcs = &mc_state[smp_processor_id()];
     unsigned int     i;
+    int rc;
 
     if ( unlikely(__test_and_set_bit(_MCSF_in_multicall, &mcs->flags)) )
     {
@@ -26,7 +28,8 @@
         return -EINVAL;
     }
 
-    if ( unlikely(!array_access_ok(call_list, nr_calls, sizeof(*call_list))) )
+    if (likely(!shadow_mode_external(current->domain)) && 
+        unlikely(!array_access_ok(call_list, nr_calls, sizeof(*call_list))) )
     {
         DPRINTK("Bad memory range %p for %u*%u bytes.\n",
                 call_list, nr_calls, (unsigned int)sizeof(*call_list));
@@ -35,23 +38,41 @@
 
     for ( i = 0; i < nr_calls; i++ )
     {
-        if ( unlikely(__copy_from_user(&mcs->call, &call_list[i], 
-                                       sizeof(*call_list))) )
+        if(shadow_mode_external(current->domain)) 
+        {
+            rc = copy_from_guest(&mcs->call, &call_list[i], 
+                    sizeof(*call_list)); 
+        }
+        else
+            rc = __copy_from_user(&mcs->call, &call_list[i], 
+                    sizeof(*call_list));
+        if ( unlikely(rc) )
         {
             DPRINTK("Error copying from user range %p for %u bytes.\n",
                     &call_list[i], (unsigned int)sizeof(*call_list));
+
             goto fault;
         }
 
         do_multicall_call(&mcs->call);
 
-        if ( unlikely(__put_user(mcs->call.result, &call_list[i].result)) )
+        if(shadow_mode_external(current->domain)) 
+        {
+            rc = copy_to_guest(&call_list[i].result, &mcs->call.result, 
+                    sizeof(mcs->call.result));
+        }
+        else
+        {
+            rc = __put_user(mcs->call.result, &call_list[i].result); 
+        }
+
+        if ( unlikely(rc) )
         {
             DPRINTK("Error writing result back to multicall block.\n");
             goto fault;
         }
 
-        if ( hypercall_preempt_check() )
+        if ( hypercall_preempt_check() && 
!shadow_mode_external(current->domain))
         {
             /*
              * Copy the sub-call continuation if it was preempted.
@@ -59,16 +80,22 @@
              */
             if ( !test_bit(_MCSF_call_preempted, &mcs->flags) )
                 i++;
-            else
-                (void)__copy_to_user(&call_list[i], &mcs->call,
-                                     sizeof(*call_list));
+            else 
+            {
+                if(shadow_mode_external(current->domain))
+                    (void)copy_to_guest(&call_list[i], &mcs->call,
+                                        sizeof(*call_list));
+                else
+                    (void)__copy_to_user(&call_list[i], &mcs->call,
+                                         sizeof(*call_list));
+            }
 
             /* Only create a continuation if there is work left to be done. */
             if ( i < nr_calls )
             {
                 mcs->flags = 0;
                 return hypercall2_create_continuation(
-                    __HYPERVISOR_multicall, &call_list[i], nr_calls-i);
+                        __HYPERVISOR_multicall, &call_list[i], nr_calls-i);
             }
         }
     }
===== xen/include/asm-x86/domain.h 1.29 vs edited =====
--- 1.29/xen/include/asm-x86/domain.h   2005-06-03 05:05:31 +08:00
+++ edited/xen/include/asm-x86/domain.h 2005-06-03 10:13:35 +08:00
@@ -117,6 +117,10 @@
 
     /* Current LDT details. */
     unsigned long shadow_ldt_mapcnt;
+ 
+    /* callback irq for virtual device in unmodified linux*/
+    unsigned int callback_irq;
+ 
 } __cacheline_aligned;
 
 #endif /* __ASM_DOMAIN_H__ */
===== xen/include/asm-x86/mm.h 1.76 vs edited =====
--- 1.76/xen/include/asm-x86/mm.h       2005-06-03 05:05:31 +08:00
+++ edited/xen/include/asm-x86/mm.h     2005-06-03 10:15:15 +08:00
@@ -272,6 +272,23 @@
     
     return mfn; 
 }
+
+static inline unsigned long set_phystomachine(unsigned long pfn, 
+                                              unsigned long ma) 
+{
+    l1_pgentry_t pte;
+    if (__copy_from_user(&pte, (__phys_to_machine_mapping + pfn), 
+                        sizeof(pte))) {
+        return 0;
+    }
+
+    pte = l1e_from_paddr(ma, l1e_get_flags(pte));
+    if(__copy_to_user((__phys_to_machine_mapping + pfn), &pte, sizeof(pte)))
+        return 0;
+
+    return ma;
+}
+
 #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
 
 #ifdef MEMORY_GUARD
@@ -349,4 +366,8 @@
                             l1_pgentry_t _nl1e, 
                             struct domain *d,
                             struct vcpu *v);
+
+
+void page_info_mfn(char *s, unsigned long mfn);
+void page_info_pte(char *s, l1_pgentry_t pte);
 #endif /* __ASM_X86_MM_H__ */
===== xen/include/asm-x86/shadow.h 1.103 vs edited =====
--- 1.103/xen/include/asm-x86/shadow.h  2005-06-03 05:25:15 +08:00
+++ edited/xen/include/asm-x86/shadow.h 2005-06-03 10:13:39 +08:00
@@ -111,6 +111,7 @@
 
 extern void shadow_mode_init(void);
 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
+extern int map_sharepage_to_guest(unsigned long va, unsigned long ma);
 extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs);
 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
 extern void shadow_invlpg(struct vcpu *, unsigned long);
@@ -600,6 +601,9 @@
            __func__, page_to_pfn(page));
     printk("Before: mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
            page->count_info, page->u.inuse.type_info);
+
+    if(shadow_mode_external(d))
+        return;
 
     shadow_lock(d);
     shadow_remove_all_access(d, page_to_pfn(page));
===== xen/include/asm-x86/vmx_virpit.h 1.3 vs edited =====
--- 1.3/xen/include/asm-x86/vmx_virpit.h        2005-06-03 05:05:31 +08:00
+++ edited/xen/include/asm-x86/vmx_virpit.h     2005-06-03 10:30:23 +08:00
@@ -33,6 +33,7 @@
 
     unsigned int count;                /* the 16 bit channel count */
     unsigned int init_val;     /* the init value for the counter */
+    struct vcpu *v;
 
 } ;
 
===== xen/include/asm-x86/vmx_vmcs.h 1.13 vs edited =====
--- 1.13/xen/include/asm-x86/vmx_vmcs.h 2005-06-03 05:05:31 +08:00
+++ edited/xen/include/asm-x86/vmx_vmcs.h       2005-06-03 10:13:40 +08:00
@@ -184,6 +184,8 @@
 #define DBG_LEVEL_3     (1 << 3)
 #define DBG_LEVEL_IO    (1 << 4)
 #define DBG_LEVEL_VMMU  (1 << 5)
+#define DBG_LEVEL_VBD  (1 << 6)
+#define DBG_LEVEL_VNIF  (1 << 7)
 
 extern unsigned int opt_vmx_debug_level;
 #define VMX_DBG_LOG(level, _f, _a...)           \
===== xen/include/asm-x86/x86_32/uaccess.h 1.19 vs edited =====
--- 1.19/xen/include/asm-x86/x86_32/uaccess.h   2005-04-23 00:34:08 +08:00
+++ edited/xen/include/asm-x86/x86_32/uaccess.h 2005-06-03 10:13:40 +08:00
@@ -332,6 +332,11 @@
 unsigned long copy_from_user(void *to,
                              const void __user *from, unsigned long n);
 
+unsigned long copy_to_guest(void __user *to, 
+                            const void *from, unsigned long n);
+unsigned long copy_from_guest(void *to,
+                              const void __user *from, unsigned long n);
+
 unsigned long clear_user(void __user *mem, unsigned long len);
 unsigned long __clear_user(void __user *mem, unsigned long len);
 
===== xen/include/asm-x86/x86_64/uaccess.h 1.15 vs edited =====
--- 1.15/xen/include/asm-x86/x86_64/uaccess.h   2005-04-19 21:48:04 +08:00
+++ edited/xen/include/asm-x86/x86_64/uaccess.h 2005-06-03 10:13:41 +08:00
@@ -224,6 +224,11 @@
 unsigned long copy_to_user(void __user *to, const void *from, unsigned len); 
 unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 
 
+unsigned long copy_to_guest(void __user *to, 
+                            const void *from, unsigned long n);
+unsigned long copy_from_guest(void *to,
+                              const void __user *from, unsigned long n);
+
 static always_inline int __copy_from_user(void *dst, const void __user *src, 
unsigned size) 
 { 
     int ret = 0;
===== xen/include/public/arch-x86_32.h 1.39 vs edited =====
--- 1.39/xen/include/public/arch-x86_32.h       2005-06-01 17:49:23 +08:00
+++ edited/xen/include/public/arch-x86_32.h     2005-06-03 10:13:43 +08:00
@@ -57,7 +57,12 @@
 #define FLAT_USER_SS    FLAT_RING3_SS
 
 /* And the trap vector is... */
+#if defined (CONFIG_VMX_GUEST)
+/*for VMX paravirtualized driver*/
+#define TRAP_INSTR     ".byte 0x0f,0x01,0xc1\n"
+#else
 #define TRAP_INSTR "int $0x82"
+#endif
 
 
 /*
===== xen/include/public/xen.h 1.127 vs edited =====
--- 1.127/xen/include/public/xen.h      2005-06-01 17:49:23 +08:00
+++ edited/xen/include/public/xen.h     2005-06-03 10:13:44 +08:00
@@ -42,6 +42,7 @@
 #define __HYPERVISOR_set_debugreg          8
 #define __HYPERVISOR_get_debugreg          9
 #define __HYPERVISOR_update_descriptor    10
+#define __HYPERVISOR_debug_printk         11
 #define __HYPERVISOR_dom_mem_op           12
 #define __HYPERVISOR_multicall            13
 #define __HYPERVISOR_update_va_mapping    14
@@ -58,6 +59,7 @@
 #define __HYPERVISOR_boot_vcpu            24
 #define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
 #define __HYPERVISOR_mmuext_op            26
+#define __HYPERVISOR_virtual_device_op    28   
 
 /* 
  * VIRTUAL INTERRUPTS
@@ -234,6 +236,16 @@
 #define VMASST_TYPE_writable_pagetables  2
 #define MAX_VMASST_TYPE 2
 
+/*
+ * Commands to HYPERVISOR_virtual_device_op().
+ */
+
+#define SET_SHAREINFO_MAP   1
+#define ADDR_MACHTOPHYS     2
+#define ADDR_PHYSTOMACH     3
+#define SET_PHYSTOMACH     4
+#define SET_CALLBACK_IRQ    5
+
 #ifndef __ASSEMBLY__
 
 typedef u16 domid_t;
@@ -322,7 +334,8 @@
      */
     u8 evtchn_upcall_pending;           /* 0 */
     u8 evtchn_upcall_mask;              /* 1 */
-    u8 pad0, pad1;
+    u8 callback_mask;                   /* 2 */ 
+    u8 pad1;
     u32 evtchn_pending_sel;             /* 4 */
     arch_vcpu_info_t arch;              /* 8 */
 } PACKED vcpu_info_t;                   /* 8 + arch */
===== xen/include/xen/config.h 1.41 vs edited =====
--- 1.41/xen/include/xen/config.h       2005-05-10 01:50:08 +08:00
+++ edited/xen/include/xen/config.h     2005-06-03 10:13:45 +08:00
@@ -36,6 +36,14 @@
 #define DPRINTK(_f, _a...) ((void)0)
 #endif
 
+#ifdef VERBOSE
+#define VNIFPRINTK(_a...) \
+     if(shadow_mode_external(current->domain))  \
+        printk(_a);
+#else
+#define VNIFPRINTK(_a...) 
+#endif
+
 #ifndef __ASSEMBLY__
 #include <xen/compiler.h>
 #endif
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.