[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/4] x86/hvm: take a reference on ioreq server emulating domain



When an ioreq server is created the code currently stores the id
of the emulating domain, but does not take a reference on that domain.

This patch modifies the code to hold a reference for the lifetime of the
ioreq server.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/ioreq.c         | 33 ++++++++++++++++++++++-----------
 xen/include/asm-x86/hvm/domain.h |  4 +---
 2 files changed, 23 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 2b9e5562dd..066c997c93 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -218,7 +218,7 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server 
*s, bool buf)
 static int hvm_map_ioreq_page(
     struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
     struct page_info *page;
     void *va;
@@ -305,6 +305,7 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
                                      bool is_default, struct vcpu *v)
 {
     struct hvm_ioreq_vcpu *sv;
+    domid_t domid;
     int rc;
 
     sv = xzalloc(struct hvm_ioreq_vcpu);
@@ -315,7 +316,9 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
 
     spin_lock(&s->lock);
 
-    rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, s->domid,
+    domid = s->emulator->domain_id;
+
+    rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, domid,
                                          NULL);
     if ( rc < 0 )
         goto fail2;
@@ -324,9 +327,9 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
 
     if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
     {
-        struct domain *d = s->domain;
+        struct domain *d = s->target;
 
-        rc = alloc_unbound_xen_event_channel(v->domain, 0, s->domid, NULL);
+        rc = alloc_unbound_xen_event_channel(v->domain, 0, domid, NULL);
         if ( rc < 0 )
             goto fail3;
 
@@ -434,7 +437,7 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
                                         bool is_default,
                                         bool handle_bufioreq)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
     unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
     int rc;
@@ -471,7 +474,7 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
 static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
                                          bool is_default)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     bool handle_bufioreq = !!s->bufioreq.va;
 
     if ( handle_bufioreq )
@@ -521,7 +524,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
         if ( rc )
             goto fail;
 
-        s->range[i] = rangeset_new(s->domain, name,
+        s->range[i] = rangeset_new(s->target, name,
                                    RANGESETF_prettyprint_hex);
 
         xfree(name);
@@ -545,7 +548,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
 static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
                                     bool is_default)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     struct hvm_ioreq_vcpu *sv;
     bool handle_bufioreq = !!s->bufioreq.va;
 
@@ -576,7 +579,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server 
*s,
 static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
                                      bool is_default)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     bool handle_bufioreq = !!s->bufioreq.va;
 
     spin_lock(&s->lock);
@@ -602,12 +605,17 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s,
                                  struct domain *d, bool is_default,
                                  int bufioreq_handling, ioservid_t id)
 {
+    struct domain *currd = current->domain;
     struct vcpu *v;
     int rc;
 
     s->id = id;
-    s->domain = d;
-    s->domid = current->domain->domain_id;
+    s->target = d;
+
+    if ( !get_domain(currd) )
+        return -EACCES;
+
+    s->emulator = currd;
 
     spin_lock_init(&s->lock);
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
@@ -641,6 +649,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
  fail_map:
     hvm_ioreq_server_free_rangesets(s, is_default);
 
+    put_domain(s->emulator);
     return rc;
 }
 
@@ -651,6 +660,8 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server 
*s,
     hvm_ioreq_server_remove_all_vcpus(s);
     hvm_ioreq_server_unmap_pages(s, is_default);
     hvm_ioreq_server_free_rangesets(s, is_default);
+
+    put_domain(s->emulator);
 }
 
 static ioservid_t next_ioservid(struct domain *d)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 7f128c05ff..6e03d024c8 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -53,13 +53,11 @@ struct hvm_ioreq_vcpu {
 
 struct hvm_ioreq_server {
     struct list_head       list_entry;
-    struct domain          *domain;
+    struct domain          *target, *emulator;
 
     /* Lock to serialize toolstack modifications */
     spinlock_t             lock;
 
-    /* Domain id of emulating domain */
-    domid_t                domid;
     ioservid_t             id;
     struct hvm_ioreq_page  ioreq;
     struct list_head       ioreq_vcpu_list;
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.