[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH V2 07/23] xen/ioreq: Move x86's ioreq_gfn(server) to struct domain



From: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>

The IOREQ is a common feature now and these structs will be used
on Arm as is. Move them to common struct domain. This also
significantly reduces the layering violation in the common code
(*arch.hvm* usage).

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>

---
Please note, this is a split/cleanup/hardening of Julien's PoC:
"Add support for Guest IO forwarding to a device emulator"

Changes V1 -> V2:
   - new patch
---
 xen/arch/x86/hvm/hvm.c           | 12 +++----
 xen/common/ioreq.c               | 72 ++++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h | 15 ---------
 xen/include/asm-x86/hvm/ioreq.h  |  4 +--
 xen/include/xen/sched.h          | 17 ++++++++++
 5 files changed, 61 insertions(+), 59 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 54e32e4..20376ce 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4218,20 +4218,20 @@ static int hvm_set_param(struct domain *d, uint32_t 
index, uint64_t value)
             rc = -EINVAL;
         break;
     case HVM_PARAM_IOREQ_SERVER_PFN:
-        d->arch.hvm.ioreq_gfn.base = value;
+        d->ioreq_gfn.base = value;
         break;
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     {
         unsigned int i;
 
         if ( value == 0 ||
-             value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
+             value > sizeof(d->ioreq_gfn.mask) * 8 )
         {
             rc = -EINVAL;
             break;
         }
         for ( i = 0; i < value; i++ )
-            set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
+            set_bit(i, &d->ioreq_gfn.mask);
 
         break;
     }
@@ -4239,11 +4239,11 @@ static int hvm_set_param(struct domain *d, uint32_t 
index, uint64_t value)
     case HVM_PARAM_IOREQ_PFN:
     case HVM_PARAM_BUFIOREQ_PFN:
         BUILD_BUG_ON(HVM_PARAM_IOREQ_PFN >
-                     sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
+                     sizeof(d->ioreq_gfn.legacy_mask) * 8);
         BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN >
-                     sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
+                     sizeof(d->ioreq_gfn.legacy_mask) * 8);
         if ( value )
-            set_bit(index, &d->arch.hvm.ioreq_gfn.legacy_mask);
+            set_bit(index, &d->ioreq_gfn.legacy_mask);
         break;
 
     case HVM_PARAM_X87_FIP_WIDTH:
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 1d62d13..7f91bc2 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -37,13 +37,13 @@ static void set_ioreq_server(struct domain *d, unsigned int 
id,
                              struct ioreq_server *s)
 {
     ASSERT(id < MAX_NR_IOREQ_SERVERS);
-    ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+    ASSERT(!s || !d->ioreq_server.server[id]);
 
-    d->arch.hvm.ioreq_server.server[id] = s;
+    d->ioreq_server.server[id] = s;
 }
 
 #define GET_IOREQ_SERVER(d, id) \
-    (d)->arch.hvm.ioreq_server.server[id]
+    (d)->ioreq_server.server[id]
 
 struct ioreq_server *get_ioreq_server(const struct domain *d,
                                       unsigned int id)
@@ -222,7 +222,7 @@ static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server 
*s)
 
     for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
     {
-        if ( !test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask) )
+        if ( !test_and_clear_bit(i, &d->ioreq_gfn.legacy_mask) )
             return _gfn(d->arch.hvm.params[i]);
     }
 
@@ -234,10 +234,10 @@ static gfn_t hvm_alloc_ioreq_gfn(struct ioreq_server *s)
     struct domain *d = s->target;
     unsigned int i;
 
-    for ( i = 0; i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8; i++ )
+    for ( i = 0; i < sizeof(d->ioreq_gfn.mask) * 8; i++ )
     {
-        if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.mask) )
-            return _gfn(d->arch.hvm.ioreq_gfn.base + i);
+        if ( test_and_clear_bit(i, &d->ioreq_gfn.mask) )
+            return _gfn(d->ioreq_gfn.base + i);
     }
 
     /*
@@ -261,21 +261,21 @@ static bool hvm_free_legacy_ioreq_gfn(struct ioreq_server 
*s,
     if ( i > HVM_PARAM_BUFIOREQ_PFN )
         return false;
 
-    set_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask);
+    set_bit(i, &d->ioreq_gfn.legacy_mask);
     return true;
 }
 
 static void hvm_free_ioreq_gfn(struct ioreq_server *s, gfn_t gfn)
 {
     struct domain *d = s->target;
-    unsigned int i = gfn_x(gfn) - d->arch.hvm.ioreq_gfn.base;
+    unsigned int i = gfn_x(gfn) - d->ioreq_gfn.base;
 
     ASSERT(!gfn_eq(gfn, INVALID_GFN));
 
     if ( !hvm_free_legacy_ioreq_gfn(s, gfn) )
     {
-        ASSERT(i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8);
-        set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
+        ASSERT(i < sizeof(d->ioreq_gfn.mask) * 8);
+        set_bit(i, &d->ioreq_gfn.mask);
     }
 }
 
@@ -400,7 +400,7 @@ bool is_ioreq_server_page(struct domain *d, const struct 
page_info *page)
     unsigned int id;
     bool found = false;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -411,7 +411,7 @@ bool is_ioreq_server_page(struct domain *d, const struct 
page_info *page)
         }
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return found;
 }
@@ -781,7 +781,7 @@ int hvm_create_ioreq_server(struct domain *d, int 
bufioreq_handling,
         return -ENOMEM;
 
     domain_pause(d);
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
     {
@@ -809,13 +809,13 @@ int hvm_create_ioreq_server(struct domain *d, int 
bufioreq_handling,
     if ( id )
         *id = i;
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -827,7 +827,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -859,7 +859,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -872,7 +872,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t 
id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -906,7 +906,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t 
id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -919,7 +919,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t 
id,
 
     ASSERT(is_hvm_domain(d));
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -957,7 +957,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t 
id,
     }
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -973,7 +973,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1009,7 +1009,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
     rc = rangeset_add_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -1025,7 +1025,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
*d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1061,7 +1061,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
*d, ioservid_t id,
     rc = rangeset_remove_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -1072,7 +1072,7 @@ int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1096,7 +1096,7 @@ int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
     return rc;
 }
 
@@ -1106,7 +1106,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
     unsigned int id;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -1115,7 +1115,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
             goto fail;
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return 0;
 
@@ -1130,7 +1130,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
         hvm_ioreq_server_remove_vcpu(s, v);
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -1140,12 +1140,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain 
*d, struct vcpu *v)
     struct ioreq_server *s;
     unsigned int id;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
@@ -1156,7 +1156,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
     if ( !arch_hvm_ioreq_destroy(d) )
         return;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1174,7 +1174,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
         xfree(s);
     }
 
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 }
 
 struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
@@ -1406,7 +1406,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool 
buffered)
 
 void hvm_ioreq_init(struct domain *d)
 {
-    spin_lock_init(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_init(&d->ioreq_server.lock);
 
     arch_hvm_ioreq_init(d);
 }
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 3b36c2f..5d60737 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -63,22 +63,7 @@ struct hvm_pi_ops {
     void (*vcpu_block)(struct vcpu *);
 };
 
-#define MAX_NR_IOREQ_SERVERS 8
-
 struct hvm_domain {
-    /* Guest page range used for non-default ioreq servers */
-    struct {
-        unsigned long base;
-        unsigned long mask; /* indexed by GFN minus base */
-        unsigned long legacy_mask; /* indexed by HVM param number */
-    } ioreq_gfn;
-
-    /* Lock protects all other values in the sub-struct and the default */
-    struct {
-        spinlock_t              lock;
-        struct ioreq_server     *server[MAX_NR_IOREQ_SERVERS];
-    } ioreq_server;
-
     /* Cached CF8 for guest PCI config cycles */
     uint32_t                pci_cf8;
 
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index d2d64a8..0fccac5 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -77,7 +77,7 @@ static inline int hvm_map_mem_type_to_ioreq_server(struct 
domain *d,
     if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_lock_recursive(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -92,7 +92,7 @@ static inline int hvm_map_mem_type_to_ioreq_server(struct 
domain *d,
     rc = p2m_set_ioreq_server(d, flags, s);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+    spin_unlock_recursive(&d->ioreq_server.lock);
 
     if ( rc == 0 && flags == 0 )
     {
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index d8ed83f..78761cd 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -314,6 +314,8 @@ struct sched_unit {
 
 struct evtchn_port_ops;
 
+#define MAX_NR_IOREQ_SERVERS 8
+
 struct domain
 {
     domid_t          domain_id;
@@ -521,6 +523,21 @@ struct domain
     /* Argo interdomain communication support */
     struct argo_domain *argo;
 #endif
+
+#ifdef CONFIG_IOREQ_SERVER
+    /* Guest page range used for non-default ioreq servers */
+    struct {
+        unsigned long base;
+        unsigned long mask;
+        unsigned long legacy_mask; /* indexed by HVM param number */
+    } ioreq_gfn;
+
+    /* Lock protects all other values in the sub-struct and the default */
+    struct {
+        spinlock_t              lock;
+        struct ioreq_server     *server[MAX_NR_IOREQ_SERVERS];
+    } ioreq_server;
+#endif
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.7.4




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.