[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v19 03/11] x86/hvm/ioreq: use gfn_t in struct hvm_ioreq_page



This patch adjusts the ioreq server code to use type-safe gfn_t values
where possible. No functional change.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

v18:
 - Trivial re-base.
---
 xen/arch/x86/hvm/ioreq.c         | 46 ++++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h |  2 +-
 2 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index f5494a73c6..2512823de7 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -217,7 +217,7 @@ bool handle_hvm_io_completion(struct vcpu *v)
     return true;
 }
 
-static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
+static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->target;
     unsigned int i;
@@ -227,20 +227,19 @@ static unsigned long hvm_alloc_ioreq_gfn(struct 
hvm_ioreq_server *s)
     for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
     {
         if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
-            return d->arch.hvm_domain.ioreq_gfn.base + i;
+            return _gfn(d->arch.hvm_domain.ioreq_gfn.base + i);
     }
 
-    return gfn_x(INVALID_GFN);
+    return INVALID_GFN;
 }
 
-static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
-                               unsigned long gfn)
+static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
 {
     struct domain *d = s->target;
-    unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
+    unsigned int i = gfn_x(gfn) - d->arch.hvm_domain.ioreq_gfn.base;
 
     ASSERT(!IS_DEFAULT(s));
-    ASSERT(gfn != gfn_x(INVALID_GFN));
+    ASSERT(!gfn_eq(gfn, INVALID_GFN));
 
     set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 }
@@ -249,7 +248,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
 {
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
 
-    if ( iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
         return;
 
     destroy_ring_for_helper(&iorp->va, iorp->page);
@@ -258,7 +257,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
     if ( !IS_DEFAULT(s) )
         hvm_free_ioreq_gfn(s, iorp->gfn);
 
-    iorp->gfn = gfn_x(INVALID_GFN);
+    iorp->gfn = INVALID_GFN;
 }
 
 static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
@@ -271,16 +270,17 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
         return -EINVAL;
 
     if ( IS_DEFAULT(s) )
-        iorp->gfn = buf ?
-                    d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
-                    d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+        iorp->gfn = _gfn(buf ?
+                         d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+                         d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]);
     else
         iorp->gfn = hvm_alloc_ioreq_gfn(s);
 
-    if ( iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
         return -ENOMEM;
 
-    rc = prepare_ring_for_helper(d, iorp->gfn, &iorp->page, &iorp->va);
+    rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
+                                 &iorp->va);
 
     if ( rc )
         hvm_unmap_ioreq_gfn(s, buf);
@@ -316,10 +316,10 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server 
*s, bool buf)
     struct domain *d = s->target;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
 
-    if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
         return;
 
-    if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
+    if ( guest_physmap_remove_page(d, iorp->gfn,
                                    _mfn(page_to_mfn(iorp->page)), 0) )
         domain_crash(d);
     clear_page(iorp->va);
@@ -331,15 +331,15 @@ static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
     int rc;
 
-    if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
         return 0;
 
     clear_page(iorp->va);
 
-    rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
+    rc = guest_physmap_add_page(d, iorp->gfn,
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
-        paging_mark_pfn_dirty(d, _pfn(iorp->gfn));
+        paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn)));
 
     return rc;
 }
@@ -601,8 +601,8 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
-    s->ioreq.gfn = gfn_x(INVALID_GFN);
-    s->bufioreq.gfn = gfn_x(INVALID_GFN);
+    s->ioreq.gfn = INVALID_GFN;
+    s->bufioreq.gfn = INVALID_GFN;
 
     rc = hvm_ioreq_server_alloc_rangesets(s, id);
     if ( rc )
@@ -791,11 +791,11 @@ int hvm_get_ioreq_server_info(struct domain *d, 
ioservid_t id,
     if ( s->emulator != current->domain )
         goto out;
 
-    *ioreq_gfn = s->ioreq.gfn;
+    *ioreq_gfn = gfn_x(s->ioreq.gfn);
 
     if ( s->bufioreq.va != NULL )
     {
-        *bufioreq_gfn = s->bufioreq.gfn;
+        *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
         *bufioreq_port = s->bufioreq_evtchn;
     }
 
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 2e4d85f6fe..9b73e80777 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -36,7 +36,7 @@
 #include <public/hvm/dm_op.h>
 
 struct hvm_ioreq_page {
-    unsigned long gfn;
+    gfn_t gfn;
     struct page_info *page;
     void *va;
 };
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.