WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] merge with xen-unstable.hg

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] merge with xen-unstable.hg
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 23 Feb 2007 09:50:31 -0800
Delivery-date: Fri, 23 Feb 2007 10:40:07 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxxx
# Date 1172011437 25200
# Node ID 315c348e5f9e2f929032dae3fec3e2583ca91785
# Parent  409e94d0a35babaa945e0406301e151f2550b87a
# Parent  e7994a122aab535b7e13fd1686cc3b8ce5d4552a
merge with xen-unstable.hg
---
 tools/check/check_crypto_lib                |    3 -
 xen/acm/acm_simple_type_enforcement_hooks.c |   11 ++--
 xen/arch/x86/hvm/io.c                       |   43 ++++++++++++++-
 xen/arch/x86/hvm/platform.c                 |   77 +++++++++++++++++-----------
 xen/arch/x86/hvm/svm/svm.c                  |   48 ++++++++++++++---
 xen/arch/x86/hvm/vmx/vmx.c                  |   39 ++++++++++++--
 xen/arch/x86/mm/shadow/multi.c              |   20 -------
 xen/arch/x86/mm/shadow/types.h              |    3 -
 xen/arch/x86/x86_emulate.c                  |   77 ++++++++++++++++------------
 xen/include/asm-x86/hvm/io.h                |    2 
 xen/include/asm-x86/p2m.h                   |    2 
 xen/include/asm-x86/paging.h                |   15 +----
 12 files changed, 228 insertions(+), 112 deletions(-)

diff -r 409e94d0a35b -r 315c348e5f9e tools/check/check_crypto_lib
--- a/tools/check/check_crypto_lib      Tue Feb 20 15:12:11 2007 -0700
+++ b/tools/check/check_crypto_lib      Tue Feb 20 15:43:57 2007 -0700
@@ -3,8 +3,9 @@
 
 RC=0
 
+PATH=/sbin:$PATH
 set -e
-ldconfig -v 2>&1 | grep -q libcrypto.so || RC=1
+ldconfig -p 2>&1 | grep -q libcrypto.so || RC=1
 
 if test ${RC} -ne 0; then
         echo
diff -r 409e94d0a35b -r 315c348e5f9e xen/acm/acm_simple_type_enforcement_hooks.c
--- a/xen/acm/acm_simple_type_enforcement_hooks.c       Tue Feb 20 15:12:11 
2007 -0700
+++ b/xen/acm/acm_simple_type_enforcement_hooks.c       Tue Feb 20 15:43:57 
2007 -0700
@@ -235,7 +235,7 @@ ste_init_state(struct acm_ste_policy_buf
         } 
         /* b) check for grant table conflicts on shared pages */
         spin_lock(&(*pd)->grant_table->lock);
-        for ( i = 0; i < nr_grant_frames((*pd)->grant_table); i++ ) {
+        for ( i = 0; i < nr_grant_entries((*pd)->grant_table); i++ ) {
 #define SPP (PAGE_SIZE / sizeof(struct grant_entry))
             sha_copy = (*pd)->grant_table->shared[i/SPP][i%SPP];
             if ( sha_copy.flags ) {
@@ -244,8 +244,9 @@ ste_init_state(struct acm_ste_policy_buf
                         (unsigned long)sha_copy.frame);
                 rdomid = sha_copy.domid;
                 if ((rdom = get_domain_by_id(rdomid)) == NULL) {
+                    spin_unlock(&(*pd)->grant_table->lock);
                     printkd("%s: domain not found ERROR!\n", __func__);
-                    goto out_gnttab;
+                    goto out;
                 };
                 /* rdom now has remote domain */
                 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 
@@ -253,16 +254,16 @@ ste_init_state(struct acm_ste_policy_buf
                 ste_rssidref = ste_rssid->ste_ssidref;
                 put_domain(rdom);
                 if (!have_common_type(ste_ssidref, ste_rssidref)) {
+                    spin_unlock(&(*pd)->grant_table->lock);
                     printkd("%s: Policy violation in grant table sharing 
domain %x -> domain %x.\n",
                             __func__, (*pd)->domain_id, rdomid);
-                    goto out_gnttab;
+                    goto out;
                 }
             }
         }
+        spin_unlock(&(*pd)->grant_table->lock);
     }
     violation = 0;
- out_gnttab:
-    spin_unlock(&(*pd)->grant_table->lock);
  out:
     read_unlock(&domlist_lock);
     return violation;
diff -r 409e94d0a35b -r 315c348e5f9e xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/arch/x86/hvm/io.c     Tue Feb 20 15:43:57 2007 -0700
@@ -371,7 +371,20 @@ static void hvm_pio_assist(struct cpu_us
             {
                 unsigned long addr = pio_opp->addr;
                 if ( hvm_paging_enabled(current) )
-                    (void)hvm_copy_to_guest_virt(addr, &p->data, p->size);
+                {
+                    int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed. */
+                        addr += p->size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
+                                 "of a page-spanning PIO: va=%#lx\n", addr);
+                        hvm_inject_exception(TRAP_page_fault, 
+                                             PFEC_write_access, addr);
+                        return;
+                    }
+                }
                 else
                     (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
             }
@@ -489,7 +502,20 @@ static void hvm_mmio_assist(struct cpu_u
             unsigned long addr = mmio_opp->addr;
 
             if (hvm_paging_enabled(current))
-                (void)hvm_copy_to_guest_virt(addr, &p->data, p->size);
+            {
+                int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
+                if ( rv != 0 ) 
+                {
+                    /* Failed on the page-spanning copy.  Inject PF into
+                     * the guest for the address where we failed. */
+                    addr += p->size - rv;
+                    gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
+                             "a page-spanning MMIO: va=%#lx\n", addr);
+                    hvm_inject_exception(TRAP_page_fault, 
+                                         PFEC_write_access, addr);
+                    return;
+                }
+            }
             else
                 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
         }
@@ -689,7 +715,18 @@ static void hvm_mmio_assist(struct cpu_u
 
     case INSTR_PUSH:
         mmio_opp->addr += hvm_get_segment_base(current, x86_seg_ss);
-        hvm_copy_to_guest_virt(mmio_opp->addr, &p->data, size);
+        { 
+            unsigned long addr = mmio_opp->addr;
+            int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
+            if ( rv != 0 ) 
+            {
+                addr += p->size - rv;
+                gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO: "
+                         "va=%#lx\n", addr);
+                hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
+                return;
+            }
+        }
         break;
     }
 }
diff -r 409e94d0a35b -r 315c348e5f9e xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/arch/x86/hvm/platform.c       Tue Feb 20 15:43:57 2007 -0700
@@ -815,7 +815,7 @@ int inst_copy_from_guest(unsigned char *
 }
 
 void send_pio_req(unsigned long port, unsigned long count, int size,
-                  long value, int dir, int df, int value_is_ptr)
+                  paddr_t value, int dir, int df, int value_is_ptr)
 {
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
@@ -823,7 +823,7 @@ void send_pio_req(unsigned long port, un
 
     if ( size == 0 || count == 0 ) {
         printk("null pio request? port %lx, count %lx, "
-               "size %d, value %lx, dir %d, value_is_ptr %d.\n",
+               "size %d, value %"PRIpaddr", dir %d, value_is_ptr %d.\n",
                port, count, size, value, dir, value_is_ptr);
     }
 
@@ -849,15 +849,7 @@ void send_pio_req(unsigned long port, un
 
     p->io_count++;
 
-    if ( value_is_ptr )   /* get physical address of data */
-    {
-        if ( hvm_paging_enabled(current) )
-            p->data = paging_gva_to_gpa(current, value);
-        else
-            p->data = value; /* guest VA == guest PA */
-    }
-    else if ( dir == IOREQ_WRITE )
-        p->data = value;
+    p->data = value;
 
     if ( hvm_portio_intercept(p) )
     {
@@ -870,7 +862,7 @@ void send_pio_req(unsigned long port, un
 }
 
 static void send_mmio_req(unsigned char type, unsigned long gpa,
-                          unsigned long count, int size, long value,
+                          unsigned long count, int size, paddr_t value,
                           int dir, int df, int value_is_ptr)
 {
     struct vcpu *v = current;
@@ -879,7 +871,8 @@ static void send_mmio_req(unsigned char 
 
     if ( size == 0 || count == 0 ) {
         printk("null mmio request? type %d, gpa %lx, "
-               "count %lx, size %d, value %lx, dir %d, value_is_ptr %d.\n",
+               "count %lx, size %d, value %"PRIpaddr"x, dir %d, "
+               "value_is_ptr %d.\n",
                type, gpa, count, size, value, dir, value_is_ptr);
     }
 
@@ -905,15 +898,7 @@ static void send_mmio_req(unsigned char 
 
     p->io_count++;
 
-    if ( value_is_ptr )
-    {
-        if ( hvm_paging_enabled(v) )
-            p->data = paging_gva_to_gpa(v, value);
-        else
-            p->data = value; /* guest VA == guest PA */
-    }
-    else
-        p->data = value;
+    p->data = value;
 
     if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
     {
@@ -960,6 +945,7 @@ static void mmio_operands(int type, unsi
 #define GET_REPEAT_COUNT() \
      (mmio_op->flags & REPZ ? (ad_size == WORD ? regs->ecx & 0xFFFF : 
regs->ecx) : 1)
 
+
 void handle_mmio(unsigned long gpa)
 {
     unsigned long inst_addr;
@@ -1014,7 +1000,8 @@ void handle_mmio(unsigned long gpa)
     {
         unsigned long count = GET_REPEAT_COUNT();
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
-        unsigned long addr;
+        unsigned long addr, gfn; 
+        paddr_t paddr;
         int dir, size = op_size;
 
         ASSERT(count);
@@ -1024,7 +1011,9 @@ void handle_mmio(unsigned long gpa)
         if ( ad_size == WORD )
             addr &= 0xFFFF;
         addr += hvm_get_segment_base(v, x86_seg_es);
-        if ( paging_gva_to_gpa(v, addr) == gpa )
+        gfn = paging_gva_to_gfn(v, addr);
+        paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
+        if ( paddr == gpa )
         {
             enum x86_segment seg;
 
@@ -1044,9 +1033,23 @@ void handle_mmio(unsigned long gpa)
             default: domain_crash_synchronous();
             }
             addr += hvm_get_segment_base(v, seg);
+            gfn = paging_gva_to_gfn(v, addr);
+            paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
         }
         else
             dir = IOREQ_READ;
+
+        if ( gfn == INVALID_GFN ) 
+        {
+            /* The guest does not have the non-mmio address mapped. 
+             * Need to send in a page fault */
+            int errcode = 0;
+            /* IO read --> memory write */
+            if ( dir == IOREQ_READ ) errcode |= PFEC_write_access;
+            regs->eip -= inst_len; /* do not advance %eip */
+            hvm_inject_exception(TRAP_page_fault, errcode, addr);
+            return;
+        }
 
         /*
          * In case of a movs spanning multiple pages, we break the accesses
@@ -1065,10 +1068,27 @@ void handle_mmio(unsigned long gpa)
 
             if ( dir == IOREQ_WRITE ) {
                 if ( hvm_paging_enabled(v) )
-                    (void)hvm_copy_from_guest_virt(&value, addr, size);
+                {
+                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed */
+                        regs->eip -= inst_len; /* do not advance %eip */
+                        /* Must set CR2 at the failing address */ 
+                        addr += size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
+                                 "page-spanning MMIO: va=%#lx\n", addr);
+                        hvm_inject_exception(TRAP_page_fault, 0, addr);
+                        return;
+                    }
+                }
                 else
-                    (void)hvm_copy_from_guest_phys(&value, addr, size);
-            } else
+                    (void) hvm_copy_from_guest_phys(&value, addr, size);
+            } else /* dir != IOREQ_WRITE */
+                /* Remember where to write the result, as a *VA*.
+                 * Must be a VA so we can handle the page overlap 
+                 * correctly in hvm_mmio_assist() */
                 mmio_op->addr = addr;
 
             if ( count != 1 )
@@ -1091,7 +1111,8 @@ void handle_mmio(unsigned long gpa)
 
             ASSERT(count);
 
-            send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, df, 1);
+            send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, 
+                          paddr, dir, df, 1);
         }
         break;
     }
diff -r 409e94d0a35b -r 315c348e5f9e xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Feb 20 15:43:57 2007 -0700
@@ -1589,6 +1589,8 @@ static void svm_io_instruction(struct vc
     if (info.fields.str)
     { 
         unsigned long addr, count;
+        paddr_t paddr;
+        unsigned long gfn;
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
 
         if (!svm_get_io_address(v, regs, size, info, &count, &addr))
@@ -1606,6 +1608,20 @@ static void svm_io_instruction(struct vc
             pio_opp->flags |= REPZ;
         }
 
+        /* Translate the address to a physical address */
+        gfn = paging_gva_to_gfn(v, addr);
+        if ( gfn == INVALID_GFN ) 
+        {
+            /* The guest does not have the RAM address mapped. 
+             * Need to send in a page fault */
+            int errcode = 0;
+            /* IO read --> memory write */
+            if ( dir == IOREQ_READ ) errcode |= PFEC_write_access;
+            svm_hvm_inject_exception(TRAP_page_fault, errcode, addr);
+            return;
+        }
+        paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
+
         /*
          * Handle string pio instructions that cross pages or that
          * are unaligned. See the comments in hvm_platform.c/handle_mmio()
@@ -1619,11 +1635,27 @@ static void svm_io_instruction(struct vc
 
             if (dir == IOREQ_WRITE)   /* OUTS */
             {
-                if (hvm_paging_enabled(current))
-                    (void)hvm_copy_from_guest_virt(&value, addr, size);
+                if ( hvm_paging_enabled(current) )
+                {
+                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed. */
+                        addr += size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
+                                 "of a page-spanning PIO: va=%#lx\n", addr);
+                        svm_hvm_inject_exception(TRAP_page_fault, 0, addr);
+                        return;
+                    }
+                }
                 else
-                    (void)hvm_copy_from_guest_phys(&value, addr, size);
-            }
+                    (void) hvm_copy_from_guest_phys(&value, addr, size);
+            } else /* dir != IOREQ_WRITE */
+                /* Remember where to write the result, as a *VA*.
+                 * Must be a VA so we can handle the page overlap 
+                 * correctly in hvm_pio_assist() */
+                pio_opp->addr = addr;
 
             if (count == 1)
                 regs->eip = vmcb->exitinfo2;
@@ -1645,7 +1677,7 @@ static void svm_io_instruction(struct vc
             else    
                 regs->eip = vmcb->exitinfo2;
 
-            send_pio_req(port, count, size, addr, dir, df, 1);
+            send_pio_req(port, count, size, paddr, dir, df, 1);
         }
     } 
     else 
@@ -2718,7 +2750,8 @@ asmlinkage void svm_vmexit_handler(struc
         if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 
         {
             if (svm_paging_enabled(v) && 
-                !mmio_space(paging_gva_to_gpa(current, vmcb->exitinfo2)))
+                !mmio_space(
+                    paging_gva_to_gfn(current, vmcb->exitinfo2) << PAGE_SHIFT))
             {
                 printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64","
                        "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64", "
@@ -2728,7 +2761,8 @@ asmlinkage void svm_vmexit_handler(struc
                        (u64)vmcb->exitinfo1,
                        (u64)vmcb->exitinfo2,
                        (u64)vmcb->exitintinfo.bytes,
-                       (u64)paging_gva_to_gpa(current, vmcb->exitinfo2));
+                       (((u64)paging_gva_to_gfn(current, vmcb->exitinfo2)
+                        << PAGE_SHIFT) | (vmcb->exitinfo2 & ~PAGE_MASK)));
             }
             else 
             {
diff -r 409e94d0a35b -r 315c348e5f9e xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Feb 20 15:43:57 2007 -0700
@@ -1426,6 +1426,8 @@ static void vmx_io_instruction(unsigned 
 
     if ( test_bit(4, &exit_qualification) ) { /* string instruction */
         unsigned long addr, count = 1, base;
+        paddr_t paddr;
+        unsigned long gfn;
         u32 ar_bytes, limit;
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
         int long_mode = 0;
@@ -1545,6 +1547,20 @@ static void vmx_io_instruction(unsigned 
         }
 #endif
 
+        /* Translate the address to a physical address */
+        gfn = paging_gva_to_gfn(current, addr);
+        if ( gfn == INVALID_GFN ) 
+        {
+            /* The guest does not have the RAM address mapped. 
+             * Need to send in a page fault */
+            int errcode = 0;
+            /* IO read --> memory write */
+            if ( dir == IOREQ_READ ) errcode |= PFEC_write_access;
+            vmx_inject_exception(TRAP_page_fault, errcode, addr);
+            return;
+        }
+        paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
+
         /*
          * Handle string pio instructions that cross pages or that
          * are unaligned. See the comments in hvm_domain.c/handle_mmio()
@@ -1557,10 +1573,25 @@ static void vmx_io_instruction(unsigned 
             if ( dir == IOREQ_WRITE )   /* OUTS */
             {
                 if ( hvm_paging_enabled(current) )
-                    (void)hvm_copy_from_guest_virt(&value, addr, size);
+                {
+                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed. */ 
+                        addr += size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
+                                 "of a page-spanning PIO: va=%#lx\n", addr);
+                        vmx_inject_exception(TRAP_page_fault, 0, addr);
+                        return;
+                    }
+                }
                 else
-                    (void)hvm_copy_from_guest_phys(&value, addr, size);
-            } else
+                    (void) hvm_copy_from_guest_phys(&value, addr, size);
+            } else /* dir != IOREQ_WRITE */
+                /* Remember where to write the result, as a *VA*.
+                 * Must be a VA so we can handle the page overlap 
+                 * correctly in hvm_pio_assist() */
                 pio_opp->addr = addr;
 
             if ( count == 1 )
@@ -1580,7 +1611,7 @@ static void vmx_io_instruction(unsigned 
             } else
                 regs->eip += inst_len;
 
-            send_pio_req(port, count, size, addr, dir, df, 1);
+            send_pio_req(port, count, size, paddr, dir, df, 1);
         }
     } else {
         if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
diff -r 409e94d0a35b -r 315c348e5f9e xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/arch/x86/mm/shadow/multi.c    Tue Feb 20 15:43:57 2007 -0700
@@ -3038,19 +3038,6 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l
 }
 
 
-static paddr_t
-sh_gva_to_gpa(struct vcpu *v, unsigned long va)
-/* Called to translate a guest virtual address to what the *guest*
- * pagetables would map it to. */
-{
-    unsigned long gfn = sh_gva_to_gfn(v, va);
-    if ( gfn == INVALID_GFN )
-        return 0;
-    else
-        return (((paddr_t)gfn) << PAGE_SHIFT) + (va & ~PAGE_MASK);
-}
-
-
 static inline void
 sh_update_linear_entries(struct vcpu *v)
 /* Sync up all the linear mappings for this vcpu's pagetables */
@@ -3932,8 +3919,7 @@ static int safe_not_to_verify_write(mfn_
 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
     struct page_info *pg = mfn_to_page(gmfn);
     if ( !(pg->shadow_flags & SHF_32) 
-         && bytes == 4 
-         && ((unsigned long)dst & 3) == 0 )
+         && ((unsigned long)dst & 7) == 0 )
     {
         /* Not shadowed 32-bit: aligned 64-bit writes that leave the
          * present bit unset are safe to ignore. */
@@ -3942,8 +3928,7 @@ static int safe_not_to_verify_write(mfn_
             return 1;
     }
     else if ( !(pg->shadow_flags & (SHF_PAE|SHF_64)) 
-              && bytes == 8 
-              && ((unsigned long)dst & 7) == 0 )
+              && ((unsigned long)dst & 3) == 0 )
     {
         /* Not shadowed PAE/64-bit: aligned 32-bit writes that leave the
          * present bit unset are safe to ignore. */
@@ -4350,7 +4335,6 @@ struct paging_mode sh_paging_mode = {
 struct paging_mode sh_paging_mode = {
     .page_fault                    = sh_page_fault, 
     .invlpg                        = sh_invlpg,
-    .gva_to_gpa                    = sh_gva_to_gpa,
     .gva_to_gfn                    = sh_gva_to_gfn,
     .update_cr3                    = sh_update_cr3,
     .update_paging_modes           = shadow_update_paging_modes,
diff -r 409e94d0a35b -r 315c348e5f9e xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/arch/x86/mm/shadow/types.h    Tue Feb 20 15:43:57 2007 -0700
@@ -244,6 +244,7 @@ static inline shadow_l4e_t shadow_l4e_fr
 
 /* Type of the guest's frame numbers */
 TYPE_SAFE(u32,gfn)
+#undef INVALID_GFN
 #define INVALID_GFN ((u32)(-1u))
 #define SH_PRI_gfn "05x"
 
@@ -307,6 +308,7 @@ static inline guest_l2e_t guest_l2e_from
 
 /* Type of the guest's frame numbers */
 TYPE_SAFE(unsigned long,gfn)
+#undef INVALID_GFN
 #define INVALID_GFN ((unsigned long)(-1ul))
 #define SH_PRI_gfn "05lx"
 
@@ -467,7 +469,6 @@ struct shadow_walk_t
  */
 #define sh_page_fault              INTERNAL_NAME(sh_page_fault)
 #define sh_invlpg                  INTERNAL_NAME(sh_invlpg)
-#define sh_gva_to_gpa              INTERNAL_NAME(sh_gva_to_gpa)
 #define sh_gva_to_gfn              INTERNAL_NAME(sh_gva_to_gfn)
 #define sh_update_cr3              INTERNAL_NAME(sh_update_cr3)
 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
diff -r 409e94d0a35b -r 315c348e5f9e xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c        Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/arch/x86/x86_emulate.c        Tue Feb 20 15:43:57 2007 -0700
@@ -519,6 +519,37 @@ do {                                    
                      ? (uint16_t)_regs.eip : (uint32_t)_regs.eip);      \
 } while (0)
 
+static int __handle_rep_prefix(
+    struct cpu_user_regs *int_regs,
+    struct cpu_user_regs *ext_regs,
+    int ad_bytes)
+{
+    unsigned long ecx = ((ad_bytes == 2) ? (uint16_t)int_regs->ecx :
+                         (ad_bytes == 4) ? (uint32_t)int_regs->ecx :
+                         int_regs->ecx);
+
+    if ( ecx-- == 0 )
+    {
+        ext_regs->eip = int_regs->eip;
+        return 1;
+    }
+
+    if ( ad_bytes == 2 )
+        *(uint16_t *)&int_regs->ecx = ecx;
+    else if ( ad_bytes == 4 )
+        int_regs->ecx = (uint32_t)ecx;
+    else
+        int_regs->ecx = ecx;
+    int_regs->eip = ext_regs->eip;
+    return 0;
+}
+
+#define handle_rep_prefix()                                                \
+do {                                                                       \
+    if ( rep_prefix && __handle_rep_prefix(&_regs, ctxt->regs, ad_bytes) ) \
+        goto done;                                                         \
+} while (0)
+
 /*
  * Unsigned multiplication with double-word result.
  * IN:  Multiplicand=m[0], Multiplier=m[1]
@@ -1579,17 +1610,6 @@ x86_emulate(
     if ( twobyte )
         goto twobyte_special_insn;
 
-    if ( rep_prefix )
-    {
-        if ( _regs.ecx == 0 )
-        {
-            ctxt->regs->eip = _regs.eip;
-            goto done;
-        }
-        _regs.ecx--;
-        _regs.eip = ctxt->regs->eip;
-    }
-
     switch ( b )
     {
     case 0x27: /* daa */ {
@@ -1727,6 +1747,7 @@ x86_emulate(
         break;
 
     case 0x6c ... 0x6d: /* ins %dx,%es:%edi */
+        handle_rep_prefix();
         generate_exception_if(!mode_iopl(), EXC_GP);
         dst.type  = OP_MEM;
         dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
@@ -1741,6 +1762,7 @@ x86_emulate(
         break;
 
     case 0x6e ... 0x6f: /* outs %esi,%dx */
+        handle_rep_prefix();
         generate_exception_if(!mode_iopl(), EXC_GP);
         dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
         if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
@@ -1827,6 +1849,7 @@ x86_emulate(
         break;
 
     case 0xa4 ... 0xa5: /* movs */
+        handle_rep_prefix();
         dst.type  = OP_MEM;
         dst.bytes = (d & ByteOp) ? 1 : op_bytes;
         dst.mem.seg = x86_seg_es;
@@ -1841,6 +1864,7 @@ x86_emulate(
         break;
 
     case 0xaa ... 0xab: /* stos */
+        handle_rep_prefix();
         dst.type  = OP_MEM;
         dst.bytes = (d & ByteOp) ? 1 : op_bytes;
         dst.mem.seg = x86_seg_es;
@@ -1851,6 +1875,7 @@ x86_emulate(
         break;
 
     case 0xac ... 0xad: /* lods */
+        handle_rep_prefix();
         dst.type  = OP_REG;
         dst.bytes = (d & ByteOp) ? 1 : op_bytes;
         dst.reg   = (unsigned long *)&_regs.eax;
@@ -2325,33 +2350,23 @@ x86_emulate(
 #endif
 
     case 0xc8 ... 0xcf: /* bswap */
-        dst.type  = OP_REG;
-        dst.reg   = decode_register(b & 7, &_regs, 0);
-        dst.val = *dst.reg;
+        dst.type = OP_REG;
+        dst.reg  = decode_register(
+            (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
         switch ( dst.bytes = op_bytes )
         {
-        case 2:
-            dst.val = (((dst.val & 0x00FFUL) << 8) |
-                       ((dst.val & 0xFF00UL) >> 8));
+        default: /* case 2: */
+            /* Undefined behaviour. Writes zero on all tested CPUs. */
+            dst.val = 0;
             break;
         case 4:
-            dst.val = (((dst.val & 0x000000FFUL) << 24) |
-                       ((dst.val & 0x0000FF00UL) <<  8) |
-                       ((dst.val & 0x00FF0000UL) >>  8) |
-                       ((dst.val & 0xFF000000UL) >> 24));
-            break;
 #ifdef __x86_64__
+            __asm__ ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
+            break;
         case 8:
-            dst.val = (((dst.val & 0x00000000000000FFUL) << 56) |
-                       ((dst.val & 0x000000000000FF00UL) << 40) |
-                       ((dst.val & 0x0000000000FF0000UL) << 24) |
-                       ((dst.val & 0x00000000FF000000UL) <<  8) |
-                       ((dst.val & 0x000000FF00000000UL) >>  8) |
-                       ((dst.val & 0x0000FF0000000000UL) >> 24) |
-                       ((dst.val & 0x00FF000000000000UL) >> 40) |
-                       ((dst.val & 0xFF00000000000000UL) >> 56));
-            break;
 #endif
+            __asm__ ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) );
+            break;
         }
         break;
     }
diff -r 409e94d0a35b -r 315c348e5f9e xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/include/asm-x86/hvm/io.h      Tue Feb 20 15:43:57 2007 -0700
@@ -144,7 +144,7 @@ static inline int irq_masked(unsigned lo
 #endif
 
 extern void send_pio_req(unsigned long port, unsigned long count, int size,
-                         long value, int dir, int df, int value_is_ptr);
+                         paddr_t value, int dir, int df, int value_is_ptr);
 extern void handle_mmio(unsigned long gpa);
 extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
 extern void hvm_io_assist(struct vcpu *v);
diff -r 409e94d0a35b -r 315c348e5f9e xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/include/asm-x86/p2m.h Tue Feb 20 15:43:57 2007 -0700
@@ -89,7 +89,7 @@ static inline unsigned long get_mfn_from
 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */
 static inline int mmio_space(paddr_t gpa)
 {
-    unsigned long gfn = gpa >> PAGE_SHIFT;    
+    unsigned long gfn = gpa >> PAGE_SHIFT;
     return !mfn_valid(mfn_x(gfn_to_mfn_current(gfn)));
 }
 
diff -r 409e94d0a35b -r 315c348e5f9e xen/include/asm-x86/paging.h
--- a/xen/include/asm-x86/paging.h      Tue Feb 20 15:12:11 2007 -0700
+++ b/xen/include/asm-x86/paging.h      Tue Feb 20 15:43:57 2007 -0700
@@ -115,7 +115,6 @@ struct paging_mode {
     int           (*page_fault            )(struct vcpu *v, unsigned long va,
                                             struct cpu_user_regs *regs);
     int           (*invlpg                )(struct vcpu *v, unsigned long va);
-    paddr_t       (*gva_to_gpa            )(struct vcpu *v, unsigned long va);
     unsigned long (*gva_to_gfn            )(struct vcpu *v, unsigned long va);
     void          (*update_cr3            )(struct vcpu *v, int do_locking);
     void          (*update_paging_modes   )(struct vcpu *v);
@@ -190,18 +189,10 @@ static inline int paging_invlpg(struct v
     return v->arch.paging.mode->invlpg(v, va);
 }
 
-/* Translate a guest virtual address to the physical address that the
- * *guest* pagetables would map it to. */
-static inline paddr_t paging_gva_to_gpa(struct vcpu *v, unsigned long va)
-{
-    if ( unlikely(!paging_vcpu_mode_translate(v)) )
-        return (paddr_t) va;
-
-    return v->arch.paging.mode->gva_to_gpa(v, va);
-}
-
 /* Translate a guest virtual address to the frame number that the
- * *guest* pagetables would map it to. */
+ * *guest* pagetables would map it to.  Returns INVALID_GFN if the guest 
+ * tables don't map this address. */
+#define INVALID_GFN (-1UL)
 static inline unsigned long paging_gva_to_gfn(struct vcpu *v, unsigned long va)
 {
     if ( unlikely(!paging_vcpu_mode_translate(v)) )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog