[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 03/11] ioreq: switch selection and forwarding to use ioservid_t



hvm_select_ioreq_server and hvm_send_ioreq where both using
hvm_ioreq_server directly, switch to use ioservid_t in order to select
and forward ioreqs.

This is a preparatory change, since future patches will use the ioreq
server id in order to differentiate between internal and external
ioreq servers.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Changes since v1:
 - New in this version.
---
 xen/arch/x86/hvm/dm.c           |  2 +-
 xen/arch/x86/hvm/emulate.c      | 14 +++++++-------
 xen/arch/x86/hvm/ioreq.c        | 24 ++++++++++++------------
 xen/arch/x86/hvm/stdvga.c       |  8 ++++----
 xen/arch/x86/mm/p2m.c           | 20 ++++++++++----------
 xen/include/asm-x86/hvm/ioreq.h |  5 ++---
 xen/include/asm-x86/p2m.h       |  9 ++++-----
 xen/include/public/hvm/dm_op.h  |  1 +
 8 files changed, 41 insertions(+), 42 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d6d0e8be89..c2fca9f729 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -263,7 +263,7 @@ static int set_mem_type(struct domain *d,
             return -EOPNOTSUPP;
 
         /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
-        if ( !p2m_get_ioreq_server(d, &flags) )
+        if ( p2m_get_ioreq_server(d, &flags) == XEN_INVALID_IOSERVID )
             return -EINVAL;
     }
 
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index d75d3e6fd6..51d2fcba2d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -254,7 +254,7 @@ static int hvmemul_do_io(
          * However, there's no cheap approach to avoid above situations in xen,
          * so the device model side needs to check the incoming ioreq event.
          */
-        struct hvm_ioreq_server *s = NULL;
+        ioservid_t id = XEN_INVALID_IOSERVID;
         p2m_type_t p2mt = p2m_invalid;
 
         if ( is_mmio )
@@ -267,9 +267,9 @@ static int hvmemul_do_io(
             {
                 unsigned int flags;
 
-                s = p2m_get_ioreq_server(currd, &flags);
+                id = p2m_get_ioreq_server(currd, &flags);
 
-                if ( s == NULL )
+                if ( id == XEN_INVALID_IOSERVID )
                 {
                     rc = X86EMUL_RETRY;
                     vio->io_req.state = STATE_IOREQ_NONE;
@@ -289,18 +289,18 @@ static int hvmemul_do_io(
             }
         }
 
-        if ( !s )
-            s = hvm_select_ioreq_server(currd, &p);
+        if ( id == XEN_INVALID_IOSERVID )
+            id = hvm_select_ioreq_server(currd, &p);
 
         /* If there is no suitable backing DM, just ignore accesses */
-        if ( !s )
+        if ( id == XEN_INVALID_IOSERVID )
         {
             rc = hvm_process_io_intercept(&null_handler, &p);
             vio->io_req.state = STATE_IOREQ_NONE;
         }
         else
         {
-            rc = hvm_send_ioreq(s, &p, 0);
+            rc = hvm_send_ioreq(id, &p, 0);
             if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
                 vio->io_req.state = STATE_IOREQ_NONE;
             else if ( !hvm_ioreq_needs_completion(&vio->io_req) )
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 69652e1080..95492bc111 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -39,6 +39,7 @@ static void set_ioreq_server(struct domain *d, unsigned int 
id,
 {
     ASSERT(id < MAX_NR_IOREQ_SERVERS);
     ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+    BUILD_BUG_ON(MAX_NR_IOREQ_SERVERS >= XEN_INVALID_IOSERVID);
 
     d->arch.hvm.ioreq_server.server[id] = s;
 }
@@ -868,7 +869,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
 
     domain_pause(d);
 
-    p2m_set_ioreq_server(d, 0, s);
+    p2m_set_ioreq_server(d, 0, id);
 
     hvm_ioreq_server_disable(s);
 
@@ -1131,7 +1132,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
     if ( s->emulator != current->domain )
         goto out;
 
-    rc = p2m_set_ioreq_server(d, flags, s);
+    rc = p2m_set_ioreq_server(d, flags, id);
 
  out:
     spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
@@ -1255,8 +1256,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
     spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 }
 
-struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
-                                                 ioreq_t *p)
+ioservid_t hvm_select_ioreq_server(struct domain *d, ioreq_t *p)
 {
     struct hvm_ioreq_server *s;
     uint32_t cf8;
@@ -1265,7 +1265,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
     unsigned int id;
 
     if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
-        return NULL;
+        return XEN_INVALID_IOSERVID;
 
     cf8 = d->arch.hvm.pci_cf8;
 
@@ -1320,7 +1320,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
             start = addr;
             end = start + p->size - 1;
             if ( rangeset_contains_range(r, start, end) )
-                return s;
+                return id;
 
             break;
 
@@ -1329,7 +1329,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
             end = hvm_mmio_last_byte(p);
 
             if ( rangeset_contains_range(r, start, end) )
-                return s;
+                return id;
 
             break;
 
@@ -1338,14 +1338,14 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
             {
                 p->type = IOREQ_TYPE_PCI_CONFIG;
                 p->addr = addr;
-                return s;
+                return id;
             }
 
             break;
         }
     }
 
-    return NULL;
+    return XEN_INVALID_IOSERVID;
 }
 
 static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
@@ -1441,12 +1441,12 @@ static int hvm_send_buffered_ioreq(struct 
hvm_ioreq_server *s, ioreq_t *p)
     return X86EMUL_OKAY;
 }
 
-int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
-                   bool buffered)
+int hvm_send_ioreq(ioservid_t id, ioreq_t *proto_p, bool buffered)
 {
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
     struct hvm_ioreq_vcpu *sv;
+    struct hvm_ioreq_server *s = get_ioreq_server(d, id);
 
     ASSERT(s);
 
@@ -1512,7 +1512,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool 
buffered)
         if ( !s->enabled )
             continue;
 
-        if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+        if ( hvm_send_ioreq(id, p, buffered) == X86EMUL_UNHANDLEABLE )
             failed++;
     }
 
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index bd398dbb1b..a689269712 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -466,7 +466,7 @@ static int stdvga_mem_write(const struct hvm_io_handler 
*handler,
         .dir = IOREQ_WRITE,
         .data = data,
     };
-    struct hvm_ioreq_server *srv;
+    ioservid_t id;
 
     if ( !stdvga_cache_is_enabled(s) || !s->stdvga )
         goto done;
@@ -507,11 +507,11 @@ static int stdvga_mem_write(const struct hvm_io_handler 
*handler,
     }
 
  done:
-    srv = hvm_select_ioreq_server(current->domain, &p);
-    if ( !srv )
+    id = hvm_select_ioreq_server(current->domain, &p);
+    if ( id == XEN_INVALID_IOSERVID )
         return X86EMUL_UNHANDLEABLE;
 
-    return hvm_send_ioreq(srv, &p, 1);
+    return hvm_send_ioreq(id, &p, 1);
 }
 
 static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 8a5229ee21..43849cbbd9 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -102,6 +102,7 @@ static int p2m_initialise(struct domain *d, struct 
p2m_domain *p2m)
         p2m_pt_init(p2m);
 
     spin_lock_init(&p2m->ioreq.lock);
+    p2m->ioreq.server = XEN_INVALID_IOSERVID;
 
     return ret;
 }
@@ -361,7 +362,7 @@ void p2m_memory_type_changed(struct domain *d)
 
 int p2m_set_ioreq_server(struct domain *d,
                          unsigned int flags,
-                         struct hvm_ioreq_server *s)
+                         ioservid_t id)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int rc;
@@ -376,16 +377,16 @@ int p2m_set_ioreq_server(struct domain *d,
     if ( flags == 0 )
     {
         rc = -EINVAL;
-        if ( p2m->ioreq.server != s )
+        if ( p2m->ioreq.server != id )
             goto out;
 
-        p2m->ioreq.server = NULL;
+        p2m->ioreq.server = XEN_INVALID_IOSERVID;
         p2m->ioreq.flags = 0;
     }
     else
     {
         rc = -EBUSY;
-        if ( p2m->ioreq.server != NULL )
+        if ( p2m->ioreq.server != XEN_INVALID_IOSERVID )
             goto out;
 
         /*
@@ -397,7 +398,7 @@ int p2m_set_ioreq_server(struct domain *d,
         if ( read_atomic(&p2m->ioreq.entry_count) )
             goto out;
 
-        p2m->ioreq.server = s;
+        p2m->ioreq.server = id;
         p2m->ioreq.flags = flags;
     }
 
@@ -409,19 +410,18 @@ int p2m_set_ioreq_server(struct domain *d,
     return rc;
 }
 
-struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
-                                              unsigned int *flags)
+ioservid_t p2m_get_ioreq_server(struct domain *d, unsigned int *flags)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    struct hvm_ioreq_server *s;
+    ioservid_t id;
 
     spin_lock(&p2m->ioreq.lock);
 
-    s = p2m->ioreq.server;
+    id = p2m->ioreq.server;
     *flags = p2m->ioreq.flags;
 
     spin_unlock(&p2m->ioreq.lock);
-    return s;
+    return id;
 }
 
 void p2m_enable_hardware_log_dirty(struct domain *d)
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index e2588e912f..65491c48d2 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -47,9 +47,8 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct 
vcpu *v);
 void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v);
 void hvm_destroy_all_ioreq_servers(struct domain *d);
 
-struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
-                                                 ioreq_t *p);
-int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
+ioservid_t hvm_select_ioreq_server(struct domain *d, ioreq_t *p);
+int hvm_send_ioreq(ioservid_t id, ioreq_t *proto_p,
                    bool buffered);
 unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 94285db1b4..99a1dab311 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -354,7 +354,7 @@ struct p2m_domain {
           * ioreq server who's responsible for the emulation of
           * gfns with specific p2m type(for now, p2m_ioreq_server).
           */
-         struct hvm_ioreq_server *server;
+         ioservid_t server;
          /*
           * flags specifies whether read, write or both operations
           * are to be emulated by an ioreq server.
@@ -819,7 +819,7 @@ static inline p2m_type_t p2m_recalc_type_range(bool recalc, 
p2m_type_t t,
     if ( !recalc || !p2m_is_changeable(t) )
         return t;
 
-    if ( t == p2m_ioreq_server && p2m->ioreq.server != NULL )
+    if ( t == p2m_ioreq_server && p2m->ioreq.server != XEN_INVALID_IOSERVID )
         return t;
 
     return p2m_is_logdirty_range(p2m, gfn_start, gfn_end) ? p2m_ram_logdirty
@@ -938,9 +938,8 @@ static inline unsigned int p2m_get_iommu_flags(p2m_type_t 
p2mt, mfn_t mfn)
 }
 
 int p2m_set_ioreq_server(struct domain *d, unsigned int flags,
-                         struct hvm_ioreq_server *s);
-struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
-                                              unsigned int *flags);
+                         ioservid_t id);
+ioservid_t p2m_get_ioreq_server(struct domain *d, unsigned int *flags);
 
 static inline int p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt,
                                    p2m_type_t ot, mfn_t nfn, mfn_t ofn,
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index d3b554d019..8725cc20d3 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -54,6 +54,7 @@
  */
 
 typedef uint16_t ioservid_t;
+#define XEN_INVALID_IOSERVID 0xffff
 
 /*
  * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
-- 
2.22.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.