WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Get rid of gva_to_gpa translation

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Get rid of gva_to_gpa translation
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 20 Feb 2007 14:10:09 -0800
Delivery-date: Tue, 20 Feb 2007 14:09:59 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1171985823 0
# Node ID 6746873997b5441ed875ec17b626d2863dc5255c
# Parent  e4ddec3dffb0b0dbd1fdc4dc51e1aafcba05bfb7
[XEN] Get rid of gva_to_gpa translation
It didn't have any sensible error checking.  Make all callers
use gva_to_gfn translation and check the result.  MMIO and PIO
callers inject pagefaults to the guest iof the non-IO address is
not mapped.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/io.c          |   30 ++++++++++++++-
 xen/arch/x86/hvm/platform.c    |   77 ++++++++++++++++++++++++++---------------
 xen/arch/x86/hvm/svm/svm.c     |   48 +++++++++++++++++++++----
 xen/arch/x86/hvm/vmx/vmx.c     |   39 ++++++++++++++++++--
 xen/arch/x86/mm/shadow/multi.c |   14 -------
 xen/arch/x86/mm/shadow/types.h |    3 +
 xen/include/asm-x86/hvm/io.h   |    2 -
 xen/include/asm-x86/p2m.h      |    2 -
 xen/include/asm-x86/paging.h   |   15 +------
 9 files changed, 160 insertions(+), 70 deletions(-)

diff -r e4ddec3dffb0 -r 6746873997b5 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/arch/x86/hvm/io.c     Tue Feb 20 15:37:03 2007 +0000
@@ -371,7 +371,20 @@ static void hvm_pio_assist(struct cpu_us
             {
                 unsigned long addr = pio_opp->addr;
                 if ( hvm_paging_enabled(current) )
-                    (void)hvm_copy_to_guest_virt(addr, &p->data, p->size);
+                {
+                    int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed. */
+                        addr += p->size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
+                                 "of a page-spanning PIO: va=%#lx\n", addr);
+                        hvm_inject_exception(TRAP_page_fault, 
+                                             PFEC_write_access, addr);
+                        return;
+                    }
+                }
                 else
                     (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
             }
@@ -489,7 +502,20 @@ static void hvm_mmio_assist(struct cpu_u
             unsigned long addr = mmio_opp->addr;
 
             if (hvm_paging_enabled(current))
-                (void)hvm_copy_to_guest_virt(addr, &p->data, p->size);
+            {
+                int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
+                if ( rv != 0 ) 
+                {
+                    /* Failed on the page-spanning copy.  Inject PF into
+                     * the guest for the address where we failed. */
+                    addr += p->size - rv;
+                    gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
+                             "a page-spanning MMIO: va=%#lx\n", addr);
+                    hvm_inject_exception(TRAP_page_fault, 
+                                         PFEC_write_access, addr);
+                    return;
+                }
+            }
             else
                 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
         }
diff -r e4ddec3dffb0 -r 6746873997b5 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/arch/x86/hvm/platform.c       Tue Feb 20 15:37:03 2007 +0000
@@ -815,7 +815,7 @@ int inst_copy_from_guest(unsigned char *
 }
 
 void send_pio_req(unsigned long port, unsigned long count, int size,
-                  long value, int dir, int df, int value_is_ptr)
+                  paddr_t value, int dir, int df, int value_is_ptr)
 {
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
@@ -823,7 +823,7 @@ void send_pio_req(unsigned long port, un
 
     if ( size == 0 || count == 0 ) {
         printk("null pio request? port %lx, count %lx, "
-               "size %d, value %lx, dir %d, value_is_ptr %d.\n",
+               "size %d, value %"PRIpaddr", dir %d, value_is_ptr %d.\n",
                port, count, size, value, dir, value_is_ptr);
     }
 
@@ -849,15 +849,7 @@ void send_pio_req(unsigned long port, un
 
     p->io_count++;
 
-    if ( value_is_ptr )   /* get physical address of data */
-    {
-        if ( hvm_paging_enabled(current) )
-            p->data = paging_gva_to_gpa(current, value);
-        else
-            p->data = value; /* guest VA == guest PA */
-    }
-    else if ( dir == IOREQ_WRITE )
-        p->data = value;
+    p->data = value;
 
     if ( hvm_portio_intercept(p) )
     {
@@ -870,7 +862,7 @@ void send_pio_req(unsigned long port, un
 }
 
 static void send_mmio_req(unsigned char type, unsigned long gpa,
-                          unsigned long count, int size, long value,
+                          unsigned long count, int size, paddr_t value,
                           int dir, int df, int value_is_ptr)
 {
     struct vcpu *v = current;
@@ -879,7 +871,8 @@ static void send_mmio_req(unsigned char 
 
     if ( size == 0 || count == 0 ) {
         printk("null mmio request? type %d, gpa %lx, "
-               "count %lx, size %d, value %lx, dir %d, value_is_ptr %d.\n",
+               "count %lx, size %d, value %"PRIpaddr"x, dir %d, "
+               "value_is_ptr %d.\n",
                type, gpa, count, size, value, dir, value_is_ptr);
     }
 
@@ -905,15 +898,7 @@ static void send_mmio_req(unsigned char 
 
     p->io_count++;
 
-    if ( value_is_ptr )
-    {
-        if ( hvm_paging_enabled(v) )
-            p->data = paging_gva_to_gpa(v, value);
-        else
-            p->data = value; /* guest VA == guest PA */
-    }
-    else
-        p->data = value;
+    p->data = value;
 
     if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
     {
@@ -960,6 +945,7 @@ static void mmio_operands(int type, unsi
 #define GET_REPEAT_COUNT() \
      (mmio_op->flags & REPZ ? (ad_size == WORD ? regs->ecx & 0xFFFF : 
regs->ecx) : 1)
 
+
 void handle_mmio(unsigned long gpa)
 {
     unsigned long inst_addr;
@@ -1014,7 +1000,8 @@ void handle_mmio(unsigned long gpa)
     {
         unsigned long count = GET_REPEAT_COUNT();
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
-        unsigned long addr;
+        unsigned long addr, gfn; 
+        paddr_t paddr;
         int dir, size = op_size;
 
         ASSERT(count);
@@ -1024,7 +1011,9 @@ void handle_mmio(unsigned long gpa)
         if ( ad_size == WORD )
             addr &= 0xFFFF;
         addr += hvm_get_segment_base(v, x86_seg_es);
-        if ( paging_gva_to_gpa(v, addr) == gpa )
+        gfn = paging_gva_to_gfn(v, addr);
+        paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
+        if ( paddr == gpa )
         {
             enum x86_segment seg;
 
@@ -1044,9 +1033,23 @@ void handle_mmio(unsigned long gpa)
             default: domain_crash_synchronous();
             }
             addr += hvm_get_segment_base(v, seg);
+            gfn = paging_gva_to_gfn(v, addr);
+            paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
         }
         else
             dir = IOREQ_READ;
+
+        if ( gfn == INVALID_GFN ) 
+        {
+            /* The guest does not have the non-mmio address mapped. 
+             * Need to send in a page fault */
+            int errcode = 0;
+            /* IO read --> memory write */
+            if ( dir == IOREQ_READ ) errcode |= PFEC_write_access;
+            regs->eip -= inst_len; /* do not advance %eip */
+            hvm_inject_exception(TRAP_page_fault, errcode, addr);
+            return;
+        }
 
         /*
          * In case of a movs spanning multiple pages, we break the accesses
@@ -1065,10 +1068,27 @@ void handle_mmio(unsigned long gpa)
 
             if ( dir == IOREQ_WRITE ) {
                 if ( hvm_paging_enabled(v) )
-                    (void)hvm_copy_from_guest_virt(&value, addr, size);
+                {
+                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed */
+                        regs->eip -= inst_len; /* do not advance %eip */
+                        /* Must set CR2 at the failing address */ 
+                        addr += size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
+                                 "page-spanning MMIO: va=%#lx\n", addr);
+                        hvm_inject_exception(TRAP_page_fault, 0, addr);
+                        return;
+                    }
+                }
                 else
-                    (void)hvm_copy_from_guest_phys(&value, addr, size);
-            } else
+                    (void) hvm_copy_from_guest_phys(&value, addr, size);
+            } else /* dir != IOREQ_WRITE */
+                /* Remember where to write the result, as a *VA*.
+                 * Must be a VA so we can handle the page overlap 
+                 * correctly in hvm_mmio_assist() */
                 mmio_op->addr = addr;
 
             if ( count != 1 )
@@ -1091,7 +1111,8 @@ void handle_mmio(unsigned long gpa)
 
             ASSERT(count);
 
-            send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, df, 1);
+            send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, 
+                          paddr, dir, df, 1);
         }
         break;
     }
diff -r e4ddec3dffb0 -r 6746873997b5 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Feb 20 15:37:03 2007 +0000
@@ -1589,6 +1589,8 @@ static void svm_io_instruction(struct vc
     if (info.fields.str)
     { 
         unsigned long addr, count;
+        paddr_t paddr;
+        unsigned long gfn;
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
 
         if (!svm_get_io_address(v, regs, size, info, &count, &addr))
@@ -1606,6 +1608,20 @@ static void svm_io_instruction(struct vc
             pio_opp->flags |= REPZ;
         }
 
+        /* Translate the address to a physical address */
+        gfn = paging_gva_to_gfn(v, addr);
+        if ( gfn == INVALID_GFN ) 
+        {
+            /* The guest does not have the RAM address mapped. 
+             * Need to send in a page fault */
+            int errcode = 0;
+            /* IO read --> memory write */
+            if ( dir == IOREQ_READ ) errcode |= PFEC_write_access;
+            svm_hvm_inject_exception(TRAP_page_fault, errcode, addr);
+            return;
+        }
+        paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
+
         /*
          * Handle string pio instructions that cross pages or that
          * are unaligned. See the comments in hvm_platform.c/handle_mmio()
@@ -1619,11 +1635,27 @@ static void svm_io_instruction(struct vc
 
             if (dir == IOREQ_WRITE)   /* OUTS */
             {
-                if (hvm_paging_enabled(current))
-                    (void)hvm_copy_from_guest_virt(&value, addr, size);
+                if ( hvm_paging_enabled(current) )
+                {
+                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed. */
+                        addr += size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
+                                 "of a page-spanning PIO: va=%#lx\n", addr);
+                        svm_hvm_inject_exception(TRAP_page_fault, 0, addr);
+                        return;
+                    }
+                }
                 else
-                    (void)hvm_copy_from_guest_phys(&value, addr, size);
-            }
+                    (void) hvm_copy_from_guest_phys(&value, addr, size);
+            } else /* dir != IOREQ_WRITE */
+                /* Remember where to write the result, as a *VA*.
+                 * Must be a VA so we can handle the page overlap 
+                 * correctly in hvm_pio_assist() */
+                pio_opp->addr = addr;
 
             if (count == 1)
                 regs->eip = vmcb->exitinfo2;
@@ -1645,7 +1677,7 @@ static void svm_io_instruction(struct vc
             else    
                 regs->eip = vmcb->exitinfo2;
 
-            send_pio_req(port, count, size, addr, dir, df, 1);
+            send_pio_req(port, count, size, paddr, dir, df, 1);
         }
     } 
     else 
@@ -2718,7 +2750,8 @@ asmlinkage void svm_vmexit_handler(struc
         if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 
         {
             if (svm_paging_enabled(v) && 
-                !mmio_space(paging_gva_to_gpa(current, vmcb->exitinfo2)))
+                !mmio_space(
+                    paging_gva_to_gfn(current, vmcb->exitinfo2) << PAGE_SHIFT))
             {
                 printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64","
                        "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64", "
@@ -2728,7 +2761,8 @@ asmlinkage void svm_vmexit_handler(struc
                        (u64)vmcb->exitinfo1,
                        (u64)vmcb->exitinfo2,
                        (u64)vmcb->exitintinfo.bytes,
-                       (u64)paging_gva_to_gpa(current, vmcb->exitinfo2));
+                       (((u64)paging_gva_to_gfn(current, vmcb->exitinfo2)
+                        << PAGE_SHIFT) | (vmcb->exitinfo2 & ~PAGE_MASK)));
             }
             else 
             {
diff -r e4ddec3dffb0 -r 6746873997b5 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Feb 20 15:37:03 2007 +0000
@@ -1426,6 +1426,8 @@ static void vmx_io_instruction(unsigned 
 
     if ( test_bit(4, &exit_qualification) ) { /* string instruction */
         unsigned long addr, count = 1, base;
+        paddr_t paddr;
+        unsigned long gfn;
         u32 ar_bytes, limit;
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
         int long_mode = 0;
@@ -1545,6 +1547,20 @@ static void vmx_io_instruction(unsigned 
         }
 #endif
 
+        /* Translate the address to a physical address */
+        gfn = paging_gva_to_gfn(current, addr);
+        if ( gfn == INVALID_GFN ) 
+        {
+            /* The guest does not have the RAM address mapped. 
+             * Need to send in a page fault */
+            int errcode = 0;
+            /* IO read --> memory write */
+            if ( dir == IOREQ_READ ) errcode |= PFEC_write_access;
+            vmx_inject_exception(TRAP_page_fault, errcode, addr);
+            return;
+        }
+        paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
+
         /*
          * Handle string pio instructions that cross pages or that
          * are unaligned. See the comments in hvm_domain.c/handle_mmio()
@@ -1557,10 +1573,25 @@ static void vmx_io_instruction(unsigned 
             if ( dir == IOREQ_WRITE )   /* OUTS */
             {
                 if ( hvm_paging_enabled(current) )
-                    (void)hvm_copy_from_guest_virt(&value, addr, size);
+                {
+                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
+                    if ( rv != 0 ) 
+                    {
+                        /* Failed on the page-spanning copy.  Inject PF into
+                         * the guest for the address where we failed. */ 
+                        addr += size - rv;
+                        gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
+                                 "of a page-spanning PIO: va=%#lx\n", addr);
+                        vmx_inject_exception(TRAP_page_fault, 0, addr);
+                        return;
+                    }
+                }
                 else
-                    (void)hvm_copy_from_guest_phys(&value, addr, size);
-            } else
+                    (void) hvm_copy_from_guest_phys(&value, addr, size);
+            } else /* dir != IOREQ_WRITE */
+                /* Remember where to write the result, as a *VA*.
+                 * Must be a VA so we can handle the page overlap 
+                 * correctly in hvm_pio_assist() */
                 pio_opp->addr = addr;
 
             if ( count == 1 )
@@ -1580,7 +1611,7 @@ static void vmx_io_instruction(unsigned 
             } else
                 regs->eip += inst_len;
 
-            send_pio_req(port, count, size, addr, dir, df, 1);
+            send_pio_req(port, count, size, paddr, dir, df, 1);
         }
     } else {
         if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
diff -r e4ddec3dffb0 -r 6746873997b5 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Tue Feb 20 15:37:03 2007 +0000
@@ -3038,19 +3038,6 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l
 }
 
 
-static paddr_t
-sh_gva_to_gpa(struct vcpu *v, unsigned long va)
-/* Called to translate a guest virtual address to what the *guest*
- * pagetables would map it to. */
-{
-    unsigned long gfn = sh_gva_to_gfn(v, va);
-    if ( gfn == INVALID_GFN )
-        return 0;
-    else
-        return (((paddr_t)gfn) << PAGE_SHIFT) + (va & ~PAGE_MASK);
-}
-
-
 static inline void
 sh_update_linear_entries(struct vcpu *v)
 /* Sync up all the linear mappings for this vcpu's pagetables */
@@ -4348,7 +4335,6 @@ struct paging_mode sh_paging_mode = {
 struct paging_mode sh_paging_mode = {
     .page_fault                    = sh_page_fault, 
     .invlpg                        = sh_invlpg,
-    .gva_to_gpa                    = sh_gva_to_gpa,
     .gva_to_gfn                    = sh_gva_to_gfn,
     .update_cr3                    = sh_update_cr3,
     .update_paging_modes           = shadow_update_paging_modes,
diff -r e4ddec3dffb0 -r 6746873997b5 xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/arch/x86/mm/shadow/types.h    Tue Feb 20 15:37:03 2007 +0000
@@ -244,6 +244,7 @@ static inline shadow_l4e_t shadow_l4e_fr
 
 /* Type of the guest's frame numbers */
 TYPE_SAFE(u32,gfn)
+#undef INVALID_GFN
 #define INVALID_GFN ((u32)(-1u))
 #define SH_PRI_gfn "05x"
 
@@ -307,6 +308,7 @@ static inline guest_l2e_t guest_l2e_from
 
 /* Type of the guest's frame numbers */
 TYPE_SAFE(unsigned long,gfn)
+#undef INVALID_GFN
 #define INVALID_GFN ((unsigned long)(-1ul))
 #define SH_PRI_gfn "05lx"
 
@@ -467,7 +469,6 @@ struct shadow_walk_t
  */
 #define sh_page_fault              INTERNAL_NAME(sh_page_fault)
 #define sh_invlpg                  INTERNAL_NAME(sh_invlpg)
-#define sh_gva_to_gpa              INTERNAL_NAME(sh_gva_to_gpa)
 #define sh_gva_to_gfn              INTERNAL_NAME(sh_gva_to_gfn)
 #define sh_update_cr3              INTERNAL_NAME(sh_update_cr3)
 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
diff -r e4ddec3dffb0 -r 6746873997b5 xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/include/asm-x86/hvm/io.h      Tue Feb 20 15:37:03 2007 +0000
@@ -144,7 +144,7 @@ static inline int irq_masked(unsigned lo
 #endif
 
 extern void send_pio_req(unsigned long port, unsigned long count, int size,
-                         long value, int dir, int df, int value_is_ptr);
+                         paddr_t value, int dir, int df, int value_is_ptr);
 extern void handle_mmio(unsigned long gpa);
 extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
 extern void hvm_io_assist(struct vcpu *v);
diff -r e4ddec3dffb0 -r 6746873997b5 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/include/asm-x86/p2m.h Tue Feb 20 15:37:03 2007 +0000
@@ -89,7 +89,7 @@ static inline unsigned long get_mfn_from
 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */
 static inline int mmio_space(paddr_t gpa)
 {
-    unsigned long gfn = gpa >> PAGE_SHIFT;    
+    unsigned long gfn = gpa >> PAGE_SHIFT;
     return !mfn_valid(mfn_x(gfn_to_mfn_current(gfn)));
 }
 
diff -r e4ddec3dffb0 -r 6746873997b5 xen/include/asm-x86/paging.h
--- a/xen/include/asm-x86/paging.h      Tue Feb 20 13:57:26 2007 +0000
+++ b/xen/include/asm-x86/paging.h      Tue Feb 20 15:37:03 2007 +0000
@@ -115,7 +115,6 @@ struct paging_mode {
     int           (*page_fault            )(struct vcpu *v, unsigned long va,
                                             struct cpu_user_regs *regs);
     int           (*invlpg                )(struct vcpu *v, unsigned long va);
-    paddr_t       (*gva_to_gpa            )(struct vcpu *v, unsigned long va);
     unsigned long (*gva_to_gfn            )(struct vcpu *v, unsigned long va);
     void          (*update_cr3            )(struct vcpu *v, int do_locking);
     void          (*update_paging_modes   )(struct vcpu *v);
@@ -190,18 +189,10 @@ static inline int paging_invlpg(struct v
     return v->arch.paging.mode->invlpg(v, va);
 }
 
-/* Translate a guest virtual address to the physical address that the
- * *guest* pagetables would map it to. */
-static inline paddr_t paging_gva_to_gpa(struct vcpu *v, unsigned long va)
-{
-    if ( unlikely(!paging_vcpu_mode_translate(v)) )
-        return (paddr_t) va;
-
-    return v->arch.paging.mode->gva_to_gpa(v, va);
-}
-
 /* Translate a guest virtual address to the frame number that the
- * *guest* pagetables would map it to. */
+ * *guest* pagetables would map it to.  Returns INVALID_GFN if the guest 
+ * tables don't map this address. */
+#define INVALID_GFN (-1UL)
 static inline unsigned long paging_gva_to_gfn(struct vcpu *v, unsigned long va)
 {
     if ( unlikely(!paging_vcpu_mode_translate(v)) )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Get rid of gva_to_gpa translation, Xen patchbot-unstable <=