|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v5 1/3] ioreq: Unify buf and non-buf ioreq page management
Switch the ioreq page mapping in hvm_map_ioreq_gfn() from
prepare_ring_for_helper() / __map_domain_page_global() to explicit
vmap(), aligning it with ioreq_server_alloc_mfn() which already
allocates domain-heap pages and will now also map them via vmap().
With both paths using vmap(), vmap_to_page() can recover the struct
page_info * uniformly during teardown, removing the need to cache the
page pointer in struct ioreq_page. So, drop the 'page' field from struct
ioreq_page and update all callers accordingly.
Signed-off-by: Julian Vetter <julian.vetter@xxxxxxxxxx>
---
Changes in v5:
- New patch that unforms the buf and non-buf code path
---
xen/arch/x86/hvm/ioreq.c | 57 ++++++++++++++++++++++++++++++++--------
xen/common/ioreq.c | 36 +++++++++++++------------
xen/include/xen/ioreq.h | 1 -
3 files changed, 65 insertions(+), 29 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index a5fa97e149..145dcba5c1 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -15,6 +15,7 @@
#include <xen/sched.h>
#include <xen/softirq.h>
#include <xen/trace.h>
+#include <xen/vmap.h>
#include <xen/vpci.h>
#include <asm/hvm/emulate.h>
@@ -128,8 +129,9 @@ static void hvm_unmap_ioreq_gfn(struct ioreq_server *s,
bool buf)
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
- destroy_ring_for_helper(&iorp->va, iorp->page);
- iorp->page = NULL;
+ put_page_and_type(vmap_to_page(iorp->va));
+ vunmap(iorp->va);
+ iorp->va = NULL;
hvm_free_ioreq_gfn(s, iorp->gfn);
iorp->gfn = INVALID_GFN;
@@ -139,9 +141,13 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool
buf)
{
struct domain *d = s->target;
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+ struct page_info *page;
+ p2m_type_t p2mt;
+ gfn_t base_gfn;
+ mfn_t mfn;
int rc;
- if ( iorp->page )
+ if ( iorp->va )
{
/*
* If a page has already been allocated (which will happen on
@@ -157,17 +163,45 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool
buf)
if ( d->is_dying )
return -EINVAL;
- iorp->gfn = hvm_alloc_ioreq_gfn(s);
+ base_gfn = hvm_alloc_ioreq_gfn(s);
- if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ if ( gfn_eq(base_gfn, INVALID_GFN) )
return -ENOMEM;
- rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
- &iorp->va);
-
+ /*
+ * vmap() is used for the Xen-side mapping so that vmap_to_page() can
+ * recover the struct page_info * during teardown, consistent with
+ * ioreq_server_alloc_mfn().
+ */
+ rc = check_get_page_from_gfn(d, base_gfn, false, &p2mt, &page);
if ( rc )
- hvm_unmap_ioreq_gfn(s, buf);
+ {
+ if ( rc == -EAGAIN )
+ rc = -ENOENT;
+ goto fail;
+ }
+
+ if ( !get_page_type(page, PGT_writable_page) )
+ {
+ put_page(page);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ mfn = page_to_mfn(page);
+ iorp->va = vmap(&mfn, 1);
+ if ( !iorp->va )
+ {
+ put_page_and_type(page);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ iorp->gfn = base_gfn;
+ return 0;
+ fail:
+ hvm_free_ioreq_gfn(s, base_gfn);
return rc;
}
@@ -179,7 +213,7 @@ static void hvm_remove_ioreq_gfn(struct ioreq_server *s,
bool buf)
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
- if ( p2m_remove_page(d, iorp->gfn, page_to_mfn(iorp->page), 0) )
+ if ( p2m_remove_page(d, iorp->gfn, page_to_mfn(vmap_to_page(iorp->va)), 0)
)
domain_crash(d);
clear_page(iorp->va);
}
@@ -195,7 +229,8 @@ static int hvm_add_ioreq_gfn(struct ioreq_server *s, bool
buf)
clear_page(iorp->va);
- rc = p2m_add_page(d, iorp->gfn, page_to_mfn(iorp->page), 0, p2m_ram_rw);
+ rc = p2m_add_page(d, iorp->gfn, page_to_mfn(vmap_to_page(iorp->va)), 0,
+ p2m_ram_rw);
if ( rc == 0 )
paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn)));
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index f5fd30ce12..5b026fc1b2 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -17,11 +17,11 @@
*/
#include <xen/domain.h>
-#include <xen/domain_page.h>
#include <xen/event.h>
#include <xen/init.h>
#include <xen/ioreq.h>
#include <xen/irq.h>
+#include <xen/vmap.h>
#include <xen/lib.h>
#include <xen/paging.h>
#include <xen/sched.h>
@@ -262,8 +262,9 @@ static int ioreq_server_alloc_mfn(struct ioreq_server *s,
bool buf)
{
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
struct page_info *page;
+ mfn_t mfn;
- if ( iorp->page )
+ if ( iorp->va )
{
/*
* If a guest frame has already been mapped (which may happen
@@ -291,11 +292,11 @@ static int ioreq_server_alloc_mfn(struct ioreq_server *s,
bool buf)
return -ENODATA;
}
- iorp->va = __map_domain_page_global(page);
+ mfn = page_to_mfn(page);
+ iorp->va = vmap(&mfn, 1);
if ( !iorp->va )
goto fail;
- iorp->page = page;
clear_page(iorp->va);
return 0;
@@ -309,14 +310,13 @@ static int ioreq_server_alloc_mfn(struct ioreq_server *s,
bool buf)
static void ioreq_server_free_mfn(struct ioreq_server *s, bool buf)
{
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- struct page_info *page = iorp->page;
+ struct page_info *page;
- if ( !page )
+ if ( !iorp->va )
return;
- iorp->page = NULL;
-
- unmap_domain_page_global(iorp->va);
+ page = vmap_to_page(iorp->va);
+ vunmap(iorp->va);
iorp->va = NULL;
put_page_alloc_ref(page);
@@ -333,7 +333,8 @@ bool is_ioreq_server_page(struct domain *d, const struct
page_info *page)
FOR_EACH_IOREQ_SERVER(d, id, s)
{
- if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
+ if ( (s->ioreq.va && vmap_to_page(s->ioreq.va) == page) ||
+ (s->bufioreq.va && vmap_to_page(s->bufioreq.va) == page) )
{
found = true;
break;
@@ -626,11 +627,12 @@ static void ioreq_server_deinit(struct ioreq_server *s)
/*
* NOTE: It is safe to call both arch_ioreq_server_unmap_pages() and
* ioreq_server_free_pages() in that order.
- * This is because the former will do nothing if the pages
- * are not mapped, leaving the page to be freed by the latter.
- * However if the pages are mapped then the former will set
- * the page_info pointer to NULL, meaning the latter will do
- * nothing.
+ * arch_ioreq_server_unmap_pages() handles the GFN-mapped path
+ * (iorp->gfn != INVALID_GFN) and clears iorp->va on completion,
+ * so ioreq_server_free_pages() will find iorp->va == NULL and
+ * do nothing. Conversely, pages allocated via the resource path
+ * have iorp->gfn == INVALID_GFN, so arch_ioreq_server_unmap_pages()
+ * is a no-op and ioreq_server_free_pages() handles the teardown.
*/
arch_ioreq_server_unmap_pages(s);
ioreq_server_free_pages(s);
@@ -819,12 +821,12 @@ int ioreq_server_get_frame(struct domain *d, ioservid_t
id,
if ( !HANDLE_BUFIOREQ(s) )
goto out;
- *mfn = page_to_mfn(s->bufioreq.page);
+ *mfn = page_to_mfn(vmap_to_page(s->bufioreq.va));
rc = 0;
break;
case XENMEM_resource_ioreq_server_frame_ioreq(0):
- *mfn = page_to_mfn(s->ioreq.page);
+ *mfn = page_to_mfn(vmap_to_page(s->ioreq.va));
rc = 0;
break;
diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h
index e86f0869fa..d63fa4729e 100644
--- a/xen/include/xen/ioreq.h
+++ b/xen/include/xen/ioreq.h
@@ -25,7 +25,6 @@
struct ioreq_page {
gfn_t gfn;
- struct page_info *page;
void *va;
};
--
2.51.0
--
Julian Vetter | Vates Hypervisor & Kernel Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |