[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v7 2/2] add a new p2m type - p2m_mmio_write_dm



From: Yu Zhang <yu.c.zhang@xxxxxxxxx>

A new p2m type, p2m_mmio_write_dm, is added to trap and emulate
the write operations on GPU's page tables. Handling of this new
p2m type are similar with existing p2m_ram_ro in most condition
checks, with only difference on final policy of emulation vs. drop.
For p2m_ram_ro types, write operations will not trigger the device
model, and will be discarded later in __hvm_copy(); while for the
p2m_mmio_write_dm type pages, writes will go to the device model
via ioreq-server.

Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx>
Signed-off-by: Wei Ye <wei.ye@xxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          | 11 ++++++++---
 xen/arch/x86/mm/p2m-ept.c       |  1 +
 xen/arch/x86/mm/p2m-pt.c        |  1 +
 xen/arch/x86/mm/shadow/multi.c  |  3 ++-
 xen/include/asm-x86/p2m.h       |  4 +++-
 xen/include/public/hvm/hvm_op.h |  1 +
 6 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2eb0795..f66f2c6 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2837,7 +2837,8 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
      * to the mmio handler.
      */
     if ( (p2mt == p2m_mmio_dm) || 
-         (npfec.write_access && (p2m_is_discard_write(p2mt))) )
+         (npfec.write_access &&
+          (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
     {
         put_gfn(p2m->domain, gfn);
 
@@ -5922,6 +5923,8 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             get_gfn_query_unlocked(d, a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
+            else if ( t == p2m_mmio_write_dm )
+                a.mem_type = HVMMEM_mmio_write_dm;
             else if ( p2m_is_readonly(t) )
                 a.mem_type =  HVMMEM_ram_ro;
             else if ( p2m_is_ram(t) )
@@ -5949,7 +5952,8 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         static const p2m_type_t memtype[] = {
             [HVMMEM_ram_rw]  = p2m_ram_rw,
             [HVMMEM_ram_ro]  = p2m_ram_ro,
-            [HVMMEM_mmio_dm] = p2m_mmio_dm
+            [HVMMEM_mmio_dm] = p2m_mmio_dm,
+            [HVMMEM_mmio_write_dm] = p2m_mmio_write_dm
         };
 
         if ( copy_from_guest(&a, arg, 1) )
@@ -5996,7 +6000,8 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 goto param_fail4;
             }
             if ( !p2m_is_ram(t) &&
-                 (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) )
+                 (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
+                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
             {
                 put_gfn(d, pfn);
                 goto param_fail4;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 15c6e83..e21a92d 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -136,6 +136,7 @@ static void ept_p2m_type_to_flags(ept_entry_t *entry, 
p2m_type_t type, p2m_acces
             entry->x = 0;
             break;
         case p2m_grant_map_ro:
+        case p2m_mmio_write_dm:
             entry->r = 1;
             entry->w = entry->x = 0;
             break;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index e48b63a..26fb18d 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -94,6 +94,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t 
mfn)
     default:
         return flags | _PAGE_NX_BIT;
     case p2m_grant_map_ro:
+    case p2m_mmio_write_dm:
         return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 94cf06d..65815bb 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3181,7 +3181,8 @@ static int sh_page_fault(struct vcpu *v,
     }
 
     /* Need to hand off device-model MMIO to the device model */
-    if ( p2mt == p2m_mmio_dm ) 
+    if ( p2mt == p2m_mmio_dm
+         || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) )
     {
         gpa = guest_walk_to_gpa(&gw);
         goto mmio;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 42de75d..2cf73ca 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -72,6 +72,7 @@ typedef enum {
     p2m_ram_shared = 12,          /* Shared or sharable memory */
     p2m_ram_broken = 13,          /* Broken page, access cause domain crash */
     p2m_map_foreign  = 14,        /* ram pages from foreign domain */
+    p2m_mmio_write_dm = 15,       /* Read-only; writes go to the device model 
*/
 } p2m_type_t;
 
 /* Modifiers to the query */
@@ -111,7 +112,8 @@ typedef unsigned int p2m_query_t;
 #define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty)     \
                       | p2m_to_mask(p2m_ram_ro)         \
                       | p2m_to_mask(p2m_grant_map_ro)   \
-                      | p2m_to_mask(p2m_ram_shared) )
+                      | p2m_to_mask(p2m_ram_shared)     \
+                      | p2m_to_mask(p2m_mmio_write_dm))
 
 /* Write-discard types, which should discard the write operations */
 #define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro)     \
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index eeb0a60..a4e5345 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -81,6 +81,7 @@ typedef enum {
     HVMMEM_ram_rw,             /* Normal read/write guest RAM */
     HVMMEM_ram_ro,             /* Read-only; writes are discarded */
     HVMMEM_mmio_dm,            /* Reads and write go to the device model */
+    HVMMEM_mmio_write_dm       /* Read-only; writes go to the device model */
 } hvmmem_type_t;
 
 /* Following tools-only interfaces may change in future. */
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.