[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 1/4] x86/ioreq server: Rename p2m_mmio_write_dm to p2m_ioreq_server.



Previously p2m type p2m_mmio_write_dm was introduced for write-
protected memory pages whose write operations are supposed to be
forwarded to and emulated by an ioreq server. Yet limitations of
rangeset restrict the number of guest pages to be write-protected.

This patch replaces the p2m type p2m_mmio_write_dm with a new name:
p2m_ioreq_server, which means this p2m type can be claimed by one
ioreq server, instead of being tracked inside the rangeset of ioreq
server. And a new memory type, HVMMEM_ioreq_server, is now used in
the HVMOP_set/get_mem_type interface to set/get this p2m type.

Patches following up will add the related HVMOP handling code which
map/unmap type p2m_ioreq_server to/from an ioreq server. Without
following patches, memory type changes to HVMMEM_ioreq_server can
still be allowed, and in such cases, p2m_ioreq_server pages will be
treated the same as ones with previous type p2m_mmio_write_dm, and
are tracked inside the ioreq server's rangeset.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>

changes in v5: 
  - Commit message changes - explained why we can allow memory
    type changes to HVMMEM_ioreq_server.
  - Add George's Acked-by.

changes in v4: 
  - According to George's comments, move the HVMMEM_unused part
    into a seperate patch(which has already been accepted);
  - Removed George's Reviewed-by because of changes after v3.
  - According to Wei Liu's comments, change the format of the commit
    message.

changes in v3: 
  - According to Jan & George's comments, keep HVMMEM_mmio_write_dm
    for old xen interface versions, and replace it with HVMMEM_unused
    for xen interfaces newer than 4.7.0; For p2m_ioreq_server, a new 
    enum - HVMMEM_ioreq_server is introduced for the get/set mem type
    interfaces;
  - Add George's Reviewed-by and Acked-by from Tim & Andrew.

changes in v2: 
  - According to George Dunlap's comments, only rename the p2m type,
    with no behavior changes.
---
 xen/arch/x86/hvm/hvm.c          | 9 ++++++---
 xen/arch/x86/mm/p2m-ept.c       | 2 +-
 xen/arch/x86/mm/p2m-pt.c        | 2 +-
 xen/arch/x86/mm/shadow/multi.c  | 2 +-
 xen/include/asm-x86/p2m.h       | 4 ++--
 xen/include/public/hvm/hvm_op.h | 5 +++--
 6 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c89ab6e..4b51c57 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1865,7 +1865,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
      */
     if ( (p2mt == p2m_mmio_dm) || 
          (npfec.write_access &&
-          (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
+          (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
     {
         __put_gfn(p2m, gfn);
         if ( ap2m_active )
@@ -5565,6 +5565,8 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             get_gfn_query_unlocked(d, a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
+            else if ( t == p2m_ioreq_server )
+                a.mem_type = HVMMEM_ioreq_server;
             else if ( p2m_is_readonly(t) )
                 a.mem_type =  HVMMEM_ram_ro;
             else if ( p2m_is_ram(t) )
@@ -5595,7 +5597,8 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             [HVMMEM_ram_rw]  = p2m_ram_rw,
             [HVMMEM_ram_ro]  = p2m_ram_ro,
             [HVMMEM_mmio_dm] = p2m_mmio_dm,
-            [HVMMEM_unused] = p2m_invalid
+            [HVMMEM_unused] = p2m_invalid,
+            [HVMMEM_ioreq_server] = p2m_ioreq_server
         };
 
         if ( copy_from_guest(&a, arg, 1) )
@@ -5644,7 +5647,7 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             }
             if ( !p2m_is_ram(t) &&
                  (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
-                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
+                 (t != p2m_ioreq_server || a.hvmmem_type != HVMMEM_ram_rw) )
             {
                 put_gfn(d, pfn);
                 goto setmemtype_fail;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 7166c71..7adc77d 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -171,7 +171,7 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m, 
ept_entry_t *entry,
             entry->a = entry->d = !!cpu_has_vmx_ept_ad;
             break;
         case p2m_grant_map_ro:
-        case p2m_mmio_write_dm:
+        case p2m_ioreq_server:
             entry->r = 1;
             entry->w = entry->x = 0;
             entry->a = !!cpu_has_vmx_ept_ad;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 4980934..05aaf8f 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -94,7 +94,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t 
mfn,
     default:
         return flags | _PAGE_NX_BIT;
     case p2m_grant_map_ro:
-    case p2m_mmio_write_dm:
+    case p2m_ioreq_server:
         return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index dfe59a2..8c4b20e 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3226,7 +3226,7 @@ static int sh_page_fault(struct vcpu *v,
 
     /* Need to hand off device-model MMIO to the device model */
     if ( p2mt == p2m_mmio_dm
-         || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) )
+         || (p2mt == p2m_ioreq_server && ft == ft_demand_write) )
     {
         gpa = guest_walk_to_gpa(&gw);
         goto mmio;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 4ab3574..6785669 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -71,7 +71,7 @@ typedef enum {
     p2m_ram_shared = 12,          /* Shared or sharable memory */
     p2m_ram_broken = 13,          /* Broken page, access cause domain crash */
     p2m_map_foreign  = 14,        /* ram pages from foreign domain */
-    p2m_mmio_write_dm = 15,       /* Read-only; writes go to the device model 
*/
+    p2m_ioreq_server = 15,
 } p2m_type_t;
 
 /* Modifiers to the query */
@@ -112,7 +112,7 @@ typedef unsigned int p2m_query_t;
                       | p2m_to_mask(p2m_ram_ro)         \
                       | p2m_to_mask(p2m_grant_map_ro)   \
                       | p2m_to_mask(p2m_ram_shared)     \
-                      | p2m_to_mask(p2m_mmio_write_dm))
+                      | p2m_to_mask(p2m_ioreq_server))
 
 /* Write-discard types, which should discard the write operations */
 #define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro)     \
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index ebb907a..b3e45cf 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -84,11 +84,12 @@ typedef enum {
     HVMMEM_ram_ro,             /* Read-only; writes are discarded */
     HVMMEM_mmio_dm,            /* Reads and write go to the device model */
 #if __XEN_INTERFACE_VERSION__ < 0x00040700
-    HVMMEM_mmio_write_dm       /* Read-only; writes go to the device model */
+    HVMMEM_mmio_write_dm,      /* Read-only; writes go to the device model */
 #else
-    HVMMEM_unused              /* Placeholder; setting memory to this type
+    HVMMEM_unused,             /* Placeholder; setting memory to this type
                                   will fail for code after 4.7.0 */
 #endif
+    HVMMEM_ioreq_server
 } hvmmem_type_t;
 
 /* Following tools-only interfaces may change in future. */
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.