[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 03/11] x86/hvm/ioreq: use gfn_t in struct hvm_ioreq_page



This patch adjusts the ioreq server code to use type-safe gfn_t values
where possible. No functional change.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/ioreq.c         | 44 ++++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h |  2 +-
 2 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 6ac4ba20ff..43b757b5b7 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -220,7 +220,7 @@ bool handle_hvm_io_completion(struct vcpu *v)
     return true;
 }
 
-static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
+static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->domain;
     unsigned int i;
@@ -230,20 +230,19 @@ static unsigned long hvm_alloc_ioreq_gfn(struct 
hvm_ioreq_server *s)
     for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
     {
         if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
-            return d->arch.hvm_domain.ioreq_gfn.base + i;
+            return _gfn(d->arch.hvm_domain.ioreq_gfn.base + i);
     }
 
-    return gfn_x(INVALID_GFN);
+    return INVALID_GFN;
 }
 
-static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
-                               unsigned long gfn)
+static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
 {
     struct domain *d = s->domain;
-    unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
+    unsigned int i = gfn_x(gfn) - d->arch.hvm_domain.ioreq_gfn.base;
 
     ASSERT(!IS_DEFAULT(s));
-    ASSERT(gfn != gfn_x(INVALID_GFN));
+    ASSERT(!gfn_eq(gfn, INVALID_GFN));
 
     set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
 }
@@ -252,7 +251,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
 {
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
 
-    if ( iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
         return;
 
     destroy_ring_for_helper(&iorp->va, iorp->page);
@@ -261,7 +260,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
     if ( !IS_DEFAULT(s) )
         hvm_free_ioreq_gfn(s, iorp->gfn);
 
-    iorp->gfn = gfn_x(INVALID_GFN);
+    iorp->gfn = INVALID_GFN;
 }
 
 static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
@@ -274,16 +273,17 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
         return -EINVAL;
 
     if ( IS_DEFAULT(s) )
-        iorp->gfn = buf ?
-                    d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
-                    d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+        iorp->gfn = _gfn(buf ?
+                         d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+                         d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]);
     else
         iorp->gfn = hvm_alloc_ioreq_gfn(s);
 
-    if ( iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
         return -ENOMEM;
 
-    rc = prepare_ring_for_helper(d, iorp->gfn, &iorp->page, &iorp->va);
+    rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
+                                 &iorp->va);
 
     if ( rc )
         hvm_unmap_ioreq_gfn(s, buf);
@@ -322,10 +322,10 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server 
*s, bool buf)
     struct domain *d = s->domain;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
 
-    if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
         return;
 
-    if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
+    if ( guest_physmap_remove_page(d, iorp->gfn,
                                    _mfn(page_to_mfn(iorp->page)), 0) )
         domain_crash(d);
     clear_page(iorp->va);
@@ -337,12 +337,12 @@ static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
     int rc;
 
-    if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+    if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
         return 0;
 
     clear_page(iorp->va);
 
-    rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
+    rc = guest_physmap_add_page(d, iorp->gfn,
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
         paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
@@ -603,8 +603,8 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
-    s->ioreq.gfn = gfn_x(INVALID_GFN);
-    s->bufioreq.gfn = gfn_x(INVALID_GFN);
+    s->ioreq.gfn = INVALID_GFN;
+    s->bufioreq.gfn = INVALID_GFN;
 
     rc = hvm_ioreq_server_alloc_rangesets(s, id);
     if ( rc )
@@ -770,11 +770,11 @@ int hvm_get_ioreq_server_info(struct domain *d, 
ioservid_t id,
 
     ASSERT(!IS_DEFAULT(s));
 
-    *ioreq_gfn = s->ioreq.gfn;
+    *ioreq_gfn = gfn_x(s->ioreq.gfn);
 
     if ( s->bufioreq.va != NULL )
     {
-        *bufioreq_gfn = s->bufioreq.gfn;
+        *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
         *bufioreq_port = s->bufioreq_evtchn;
     }
 
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index e17bbe4004..3bd9c5d7c0 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -36,7 +36,7 @@
 #include <public/hvm/dm_op.h>
 
 struct hvm_ioreq_page {
-    unsigned long gfn;
+    gfn_t gfn;
     struct page_info *page;
     void *va;
 };
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.